xref: /openbmc/linux/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c (revision 3aa139aa9fdc138a84243dc49dc18d9b40e1c6e4)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <linux/console.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/mem_encrypt.h>
33 
34 #include <drm/drm_drv.h>
35 #include <drm/drm_fb_helper.h>
36 #include <drm/drm_ioctl.h>
37 #include <drm/drm_sysfs.h>
38 #include <drm/ttm/ttm_bo_driver.h>
39 #include <drm/ttm/ttm_placement.h>
40 
41 #include "ttm_object.h"
42 #include "vmwgfx_binding.h"
43 #include "vmwgfx_drv.h"
44 
45 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
46 
47 #define VMW_MIN_INITIAL_WIDTH 800
48 #define VMW_MIN_INITIAL_HEIGHT 600
49 
50 #ifndef VMWGFX_GIT_VERSION
51 #define VMWGFX_GIT_VERSION "Unknown"
52 #endif
53 
54 #define VMWGFX_REPO "In Tree"
55 
56 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
57 
58 
59 /**
60  * Fully encoded drm commands. Might move to vmw_drm.h
61  */
62 
63 #define DRM_IOCTL_VMW_GET_PARAM					\
64 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
65 		 struct drm_vmw_getparam_arg)
66 #define DRM_IOCTL_VMW_ALLOC_DMABUF				\
67 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
68 		union drm_vmw_alloc_dmabuf_arg)
69 #define DRM_IOCTL_VMW_UNREF_DMABUF				\
70 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
71 		struct drm_vmw_unref_dmabuf_arg)
72 #define DRM_IOCTL_VMW_CURSOR_BYPASS				\
73 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
74 		 struct drm_vmw_cursor_bypass_arg)
75 
76 #define DRM_IOCTL_VMW_CONTROL_STREAM				\
77 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
78 		 struct drm_vmw_control_stream_arg)
79 #define DRM_IOCTL_VMW_CLAIM_STREAM				\
80 	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
81 		 struct drm_vmw_stream_arg)
82 #define DRM_IOCTL_VMW_UNREF_STREAM				\
83 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
84 		 struct drm_vmw_stream_arg)
85 
86 #define DRM_IOCTL_VMW_CREATE_CONTEXT				\
87 	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
88 		struct drm_vmw_context_arg)
89 #define DRM_IOCTL_VMW_UNREF_CONTEXT				\
90 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
91 		struct drm_vmw_context_arg)
92 #define DRM_IOCTL_VMW_CREATE_SURFACE				\
93 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
94 		 union drm_vmw_surface_create_arg)
95 #define DRM_IOCTL_VMW_UNREF_SURFACE				\
96 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
97 		 struct drm_vmw_surface_arg)
98 #define DRM_IOCTL_VMW_REF_SURFACE				\
99 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
100 		 union drm_vmw_surface_reference_arg)
101 #define DRM_IOCTL_VMW_EXECBUF					\
102 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
103 		struct drm_vmw_execbuf_arg)
104 #define DRM_IOCTL_VMW_GET_3D_CAP				\
105 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
106 		 struct drm_vmw_get_3d_cap_arg)
107 #define DRM_IOCTL_VMW_FENCE_WAIT				\
108 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
109 		 struct drm_vmw_fence_wait_arg)
110 #define DRM_IOCTL_VMW_FENCE_SIGNALED				\
111 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
112 		 struct drm_vmw_fence_signaled_arg)
113 #define DRM_IOCTL_VMW_FENCE_UNREF				\
114 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
115 		 struct drm_vmw_fence_arg)
116 #define DRM_IOCTL_VMW_FENCE_EVENT				\
117 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
118 		 struct drm_vmw_fence_event_arg)
119 #define DRM_IOCTL_VMW_PRESENT					\
120 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
121 		 struct drm_vmw_present_arg)
122 #define DRM_IOCTL_VMW_PRESENT_READBACK				\
123 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
124 		 struct drm_vmw_present_readback_arg)
125 #define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
126 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
127 		 struct drm_vmw_update_layout_arg)
128 #define DRM_IOCTL_VMW_CREATE_SHADER				\
129 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
130 		 struct drm_vmw_shader_create_arg)
131 #define DRM_IOCTL_VMW_UNREF_SHADER				\
132 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
133 		 struct drm_vmw_shader_arg)
134 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
135 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
136 		 union drm_vmw_gb_surface_create_arg)
137 #define DRM_IOCTL_VMW_GB_SURFACE_REF				\
138 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
139 		 union drm_vmw_gb_surface_reference_arg)
140 #define DRM_IOCTL_VMW_SYNCCPU					\
141 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
142 		 struct drm_vmw_synccpu_arg)
143 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
144 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
145 		struct drm_vmw_context_arg)
146 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT				\
147 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT,	\
148 		union drm_vmw_gb_surface_create_ext_arg)
149 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT				\
150 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT,		\
151 		union drm_vmw_gb_surface_reference_ext_arg)
152 #define DRM_IOCTL_VMW_MSG						\
153 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG,			\
154 		struct drm_vmw_msg_arg)
155 
156 /**
157  * The core DRM version of this macro doesn't account for
158  * DRM_COMMAND_BASE.
159  */
160 
161 #define VMW_IOCTL_DEF(ioctl, func, flags) \
162   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
163 
164 /**
165  * Ioctl definitions.
166  */
167 
168 static const struct drm_ioctl_desc vmw_ioctls[] = {
169 	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
170 		      DRM_RENDER_ALLOW),
171 	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
172 		      DRM_RENDER_ALLOW),
173 	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
174 		      DRM_RENDER_ALLOW),
175 	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
176 		      vmw_kms_cursor_bypass_ioctl,
177 		      DRM_MASTER),
178 
179 	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
180 		      DRM_MASTER),
181 	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
182 		      DRM_MASTER),
183 	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
184 		      DRM_MASTER),
185 
186 	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
187 		      DRM_RENDER_ALLOW),
188 	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
189 		      DRM_RENDER_ALLOW),
190 	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
191 		      DRM_RENDER_ALLOW),
192 	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
193 		      DRM_RENDER_ALLOW),
194 	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
195 		      DRM_RENDER_ALLOW),
196 	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
197 		      DRM_RENDER_ALLOW),
198 	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
199 		      DRM_RENDER_ALLOW),
200 	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
201 		      vmw_fence_obj_signaled_ioctl,
202 		      DRM_RENDER_ALLOW),
203 	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
204 		      DRM_RENDER_ALLOW),
205 	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
206 		      DRM_RENDER_ALLOW),
207 	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
208 		      DRM_RENDER_ALLOW),
209 
210 	/* these allow direct access to the framebuffers mark as master only */
211 	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
212 		      DRM_MASTER | DRM_AUTH),
213 	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
214 		      vmw_present_readback_ioctl,
215 		      DRM_MASTER | DRM_AUTH),
216 	/*
217 	 * The permissions of the below ioctl are overridden in
218 	 * vmw_generic_ioctl(). We require either
219 	 * DRM_MASTER or capable(CAP_SYS_ADMIN).
220 	 */
221 	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
222 		      vmw_kms_update_layout_ioctl,
223 		      DRM_RENDER_ALLOW),
224 	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
225 		      vmw_shader_define_ioctl,
226 		      DRM_RENDER_ALLOW),
227 	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
228 		      vmw_shader_destroy_ioctl,
229 		      DRM_RENDER_ALLOW),
230 	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
231 		      vmw_gb_surface_define_ioctl,
232 		      DRM_RENDER_ALLOW),
233 	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
234 		      vmw_gb_surface_reference_ioctl,
235 		      DRM_RENDER_ALLOW),
236 	VMW_IOCTL_DEF(VMW_SYNCCPU,
237 		      vmw_user_bo_synccpu_ioctl,
238 		      DRM_RENDER_ALLOW),
239 	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
240 		      vmw_extended_context_define_ioctl,
241 		      DRM_RENDER_ALLOW),
242 	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
243 		      vmw_gb_surface_define_ext_ioctl,
244 		      DRM_RENDER_ALLOW),
245 	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
246 		      vmw_gb_surface_reference_ext_ioctl,
247 		      DRM_RENDER_ALLOW),
248 	VMW_IOCTL_DEF(VMW_MSG,
249 		      vmw_msg_ioctl,
250 		      DRM_RENDER_ALLOW),
251 };
252 
253 static const struct pci_device_id vmw_pci_id_list[] = {
254 	{ PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
255 	{ }
256 };
257 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
258 
259 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
260 static int vmw_force_iommu;
261 static int vmw_restrict_iommu;
262 static int vmw_force_coherent;
263 static int vmw_restrict_dma_mask;
264 static int vmw_assume_16bpp;
265 
266 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
267 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
268 			      void *ptr);
269 
270 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
271 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
272 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
273 module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
274 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
275 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
276 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
277 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
278 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
279 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
280 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
281 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
282 
283 
284 static void vmw_print_capabilities2(uint32_t capabilities2)
285 {
286 	DRM_INFO("Capabilities2:\n");
287 	if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
288 		DRM_INFO("  Grow oTable.\n");
289 	if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
290 		DRM_INFO("  IntraSurface copy.\n");
291 	if (capabilities2 & SVGA_CAP2_DX3)
292 		DRM_INFO("  DX3.\n");
293 }
294 
295 static void vmw_print_capabilities(uint32_t capabilities)
296 {
297 	DRM_INFO("Capabilities:\n");
298 	if (capabilities & SVGA_CAP_RECT_COPY)
299 		DRM_INFO("  Rect copy.\n");
300 	if (capabilities & SVGA_CAP_CURSOR)
301 		DRM_INFO("  Cursor.\n");
302 	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
303 		DRM_INFO("  Cursor bypass.\n");
304 	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
305 		DRM_INFO("  Cursor bypass 2.\n");
306 	if (capabilities & SVGA_CAP_8BIT_EMULATION)
307 		DRM_INFO("  8bit emulation.\n");
308 	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
309 		DRM_INFO("  Alpha cursor.\n");
310 	if (capabilities & SVGA_CAP_3D)
311 		DRM_INFO("  3D.\n");
312 	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
313 		DRM_INFO("  Extended Fifo.\n");
314 	if (capabilities & SVGA_CAP_MULTIMON)
315 		DRM_INFO("  Multimon.\n");
316 	if (capabilities & SVGA_CAP_PITCHLOCK)
317 		DRM_INFO("  Pitchlock.\n");
318 	if (capabilities & SVGA_CAP_IRQMASK)
319 		DRM_INFO("  Irq mask.\n");
320 	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
321 		DRM_INFO("  Display Topology.\n");
322 	if (capabilities & SVGA_CAP_GMR)
323 		DRM_INFO("  GMR.\n");
324 	if (capabilities & SVGA_CAP_TRACES)
325 		DRM_INFO("  Traces.\n");
326 	if (capabilities & SVGA_CAP_GMR2)
327 		DRM_INFO("  GMR2.\n");
328 	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
329 		DRM_INFO("  Screen Object 2.\n");
330 	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
331 		DRM_INFO("  Command Buffers.\n");
332 	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
333 		DRM_INFO("  Command Buffers 2.\n");
334 	if (capabilities & SVGA_CAP_GBOBJECTS)
335 		DRM_INFO("  Guest Backed Resources.\n");
336 	if (capabilities & SVGA_CAP_DX)
337 		DRM_INFO("  DX Features.\n");
338 	if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
339 		DRM_INFO("  HP Command Queue.\n");
340 }
341 
342 /**
343  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
344  *
345  * @dev_priv: A device private structure.
346  *
347  * This function creates a small buffer object that holds the query
348  * result for dummy queries emitted as query barriers.
349  * The function will then map the first page and initialize a pending
350  * occlusion query result structure, Finally it will unmap the buffer.
351  * No interruptible waits are done within this function.
352  *
353  * Returns an error if bo creation or initialization fails.
354  */
355 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
356 {
357 	int ret;
358 	struct vmw_buffer_object *vbo;
359 	struct ttm_bo_kmap_obj map;
360 	volatile SVGA3dQueryResult *result;
361 	bool dummy;
362 
363 	/*
364 	 * Create the vbo as pinned, so that a tryreserve will
365 	 * immediately succeed. This is because we're the only
366 	 * user of the bo currently.
367 	 */
368 	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
369 	if (!vbo)
370 		return -ENOMEM;
371 
372 	ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
373 			  &vmw_sys_placement, false, true,
374 			  &vmw_bo_bo_free);
375 	if (unlikely(ret != 0))
376 		return ret;
377 
378 	ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
379 	BUG_ON(ret != 0);
380 	vmw_bo_pin_reserved(vbo, true);
381 
382 	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
383 	if (likely(ret == 0)) {
384 		result = ttm_kmap_obj_virtual(&map, &dummy);
385 		result->totalSize = sizeof(*result);
386 		result->state = SVGA3D_QUERYSTATE_PENDING;
387 		result->result32 = 0xff;
388 		ttm_bo_kunmap(&map);
389 	}
390 	vmw_bo_pin_reserved(vbo, false);
391 	ttm_bo_unreserve(&vbo->base);
392 
393 	if (unlikely(ret != 0)) {
394 		DRM_ERROR("Dummy query buffer map failed.\n");
395 		vmw_bo_unreference(&vbo);
396 	} else
397 		dev_priv->dummy_query_bo = vbo;
398 
399 	return ret;
400 }
401 
402 /**
403  * vmw_request_device_late - Perform late device setup
404  *
405  * @dev_priv: Pointer to device private.
406  *
407  * This function performs setup of otables and enables large command
408  * buffer submission. These tasks are split out to a separate function
409  * because it reverts vmw_release_device_early and is intended to be used
410  * by an error path in the hibernation code.
411  */
412 static int vmw_request_device_late(struct vmw_private *dev_priv)
413 {
414 	int ret;
415 
416 	if (dev_priv->has_mob) {
417 		ret = vmw_otables_setup(dev_priv);
418 		if (unlikely(ret != 0)) {
419 			DRM_ERROR("Unable to initialize "
420 				  "guest Memory OBjects.\n");
421 			return ret;
422 		}
423 	}
424 
425 	if (dev_priv->cman) {
426 		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
427 		if (ret) {
428 			struct vmw_cmdbuf_man *man = dev_priv->cman;
429 
430 			dev_priv->cman = NULL;
431 			vmw_cmdbuf_man_destroy(man);
432 		}
433 	}
434 
435 	return 0;
436 }
437 
438 static int vmw_request_device(struct vmw_private *dev_priv)
439 {
440 	int ret;
441 
442 	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
443 	if (unlikely(ret != 0)) {
444 		DRM_ERROR("Unable to initialize FIFO.\n");
445 		return ret;
446 	}
447 	vmw_fence_fifo_up(dev_priv->fman);
448 	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
449 	if (IS_ERR(dev_priv->cman)) {
450 		dev_priv->cman = NULL;
451 		dev_priv->sm_type = VMW_SM_LEGACY;
452 	}
453 
454 	ret = vmw_request_device_late(dev_priv);
455 	if (ret)
456 		goto out_no_mob;
457 
458 	ret = vmw_dummy_query_bo_create(dev_priv);
459 	if (unlikely(ret != 0))
460 		goto out_no_query_bo;
461 
462 	return 0;
463 
464 out_no_query_bo:
465 	if (dev_priv->cman)
466 		vmw_cmdbuf_remove_pool(dev_priv->cman);
467 	if (dev_priv->has_mob) {
468 		struct ttm_resource_manager *man;
469 
470 		man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
471 		ttm_resource_manager_evict_all(&dev_priv->bdev, man);
472 		vmw_otables_takedown(dev_priv);
473 	}
474 	if (dev_priv->cman)
475 		vmw_cmdbuf_man_destroy(dev_priv->cman);
476 out_no_mob:
477 	vmw_fence_fifo_down(dev_priv->fman);
478 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
479 	return ret;
480 }
481 
482 /**
483  * vmw_release_device_early - Early part of fifo takedown.
484  *
485  * @dev_priv: Pointer to device private struct.
486  *
487  * This is the first part of command submission takedown, to be called before
488  * buffer management is taken down.
489  */
490 static void vmw_release_device_early(struct vmw_private *dev_priv)
491 {
492 	/*
493 	 * Previous destructions should've released
494 	 * the pinned bo.
495 	 */
496 
497 	BUG_ON(dev_priv->pinned_bo != NULL);
498 
499 	vmw_bo_unreference(&dev_priv->dummy_query_bo);
500 	if (dev_priv->cman)
501 		vmw_cmdbuf_remove_pool(dev_priv->cman);
502 
503 	if (dev_priv->has_mob) {
504 		struct ttm_resource_manager *man;
505 
506 		man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
507 		ttm_resource_manager_evict_all(&dev_priv->bdev, man);
508 		vmw_otables_takedown(dev_priv);
509 	}
510 }
511 
512 /**
513  * vmw_release_device_late - Late part of fifo takedown.
514  *
515  * @dev_priv: Pointer to device private struct.
516  *
517  * This is the last part of the command submission takedown, to be called when
518  * command submission is no longer needed. It may wait on pending fences.
519  */
520 static void vmw_release_device_late(struct vmw_private *dev_priv)
521 {
522 	vmw_fence_fifo_down(dev_priv->fman);
523 	if (dev_priv->cman)
524 		vmw_cmdbuf_man_destroy(dev_priv->cman);
525 
526 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
527 }
528 
529 /**
530  * Sets the initial_[width|height] fields on the given vmw_private.
531  *
532  * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
533  * clamping the value to fb_max_[width|height] fields and the
534  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
535  * If the values appear to be invalid, set them to
536  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
537  */
538 static void vmw_get_initial_size(struct vmw_private *dev_priv)
539 {
540 	uint32_t width;
541 	uint32_t height;
542 
543 	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
544 	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
545 
546 	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
547 	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
548 
549 	if (width > dev_priv->fb_max_width ||
550 	    height > dev_priv->fb_max_height) {
551 
552 		/*
553 		 * This is a host error and shouldn't occur.
554 		 */
555 
556 		width = VMW_MIN_INITIAL_WIDTH;
557 		height = VMW_MIN_INITIAL_HEIGHT;
558 	}
559 
560 	dev_priv->initial_width = width;
561 	dev_priv->initial_height = height;
562 }
563 
564 /**
565  * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
566  * system.
567  *
568  * @dev_priv: Pointer to a struct vmw_private
569  *
570  * This functions tries to determine what actions need to be taken by the
571  * driver to make system pages visible to the device.
572  * If this function decides that DMA is not possible, it returns -EINVAL.
573  * The driver may then try to disable features of the device that require
574  * DMA.
575  */
576 static int vmw_dma_select_mode(struct vmw_private *dev_priv)
577 {
578 	static const char *names[vmw_dma_map_max] = {
579 		[vmw_dma_phys] = "Using physical TTM page addresses.",
580 		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
581 		[vmw_dma_map_populate] = "Caching DMA mappings.",
582 		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
583 
584 	/* TTM currently doesn't fully support SEV encryption. */
585 	if (mem_encrypt_active())
586 		return -EINVAL;
587 
588 	if (vmw_force_coherent)
589 		dev_priv->map_mode = vmw_dma_alloc_coherent;
590 	else if (vmw_restrict_iommu)
591 		dev_priv->map_mode = vmw_dma_map_bind;
592 	else
593 		dev_priv->map_mode = vmw_dma_map_populate;
594 
595 	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
596 	return 0;
597 }
598 
599 /**
600  * vmw_dma_masks - set required page- and dma masks
601  *
602  * @dev: Pointer to struct drm-device
603  *
604  * With 32-bit we can only handle 32 bit PFNs. Optionally set that
605  * restriction also for 64-bit systems.
606  */
607 static int vmw_dma_masks(struct vmw_private *dev_priv)
608 {
609 	struct drm_device *dev = &dev_priv->drm;
610 	int ret = 0;
611 
612 	ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
613 	if (dev_priv->map_mode != vmw_dma_phys &&
614 	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
615 		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
616 		return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
617 	}
618 
619 	return ret;
620 }
621 
622 static int vmw_vram_manager_init(struct vmw_private *dev_priv)
623 {
624 	int ret;
625 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
626 	ret = vmw_thp_init(dev_priv);
627 #else
628 	ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
629 				 dev_priv->vram_size >> PAGE_SHIFT);
630 #endif
631 	ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
632 	return ret;
633 }
634 
635 static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
636 {
637 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
638 	vmw_thp_fini(dev_priv);
639 #else
640 	ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
641 #endif
642 }
643 
644 static int vmw_setup_pci_resources(struct vmw_private *dev,
645 				   unsigned long pci_id)
646 {
647 	resource_size_t fifo_start;
648 	resource_size_t fifo_size;
649 	int ret;
650 	struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
651 
652 	pci_set_master(pdev);
653 
654 	ret = pci_request_regions(pdev, "vmwgfx probe");
655 	if (ret)
656 		return ret;
657 
658 	dev->io_start = pci_resource_start(pdev, 0);
659 	dev->vram_start = pci_resource_start(pdev, 1);
660 	dev->vram_size = pci_resource_len(pdev, 1);
661 	fifo_start = pci_resource_start(pdev, 2);
662 	fifo_size = pci_resource_len(pdev, 2);
663 
664 	DRM_INFO("FIFO at %pa size is %llu kiB\n",
665 		 &fifo_start, (uint64_t)fifo_size / 1024);
666 	dev->fifo_mem = devm_memremap(dev->drm.dev,
667 				      fifo_start,
668 				      fifo_size,
669 				      MEMREMAP_WB);
670 
671 	if (IS_ERR(dev->fifo_mem)) {
672 		DRM_ERROR("Failed mapping FIFO memory.\n");
673 		pci_release_regions(pdev);
674 		return PTR_ERR(dev->fifo_mem);
675 	}
676 
677 	/*
678 	 * This is approximate size of the vram, the exact size will only
679 	 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
680 	 * size will be equal to or bigger than the size reported by
681 	 * SVGA_REG_VRAM_SIZE.
682 	 */
683 	DRM_INFO("VRAM at %pa size is %llu kiB\n",
684 		 &dev->vram_start, (uint64_t)dev->vram_size / 1024);
685 
686 	return 0;
687 }
688 
689 static int vmw_detect_version(struct vmw_private *dev)
690 {
691 	uint32_t svga_id;
692 
693 	vmw_write(dev, SVGA_REG_ID, SVGA_ID_2);
694 	svga_id = vmw_read(dev, SVGA_REG_ID);
695 	if (svga_id != SVGA_ID_2) {
696 		DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n",
697 			  svga_id, dev->vmw_chipset);
698 		return -ENOSYS;
699 	}
700 	return 0;
701 }
702 
703 static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
704 {
705 	int ret;
706 	enum vmw_res_type i;
707 	bool refuse_dma = false;
708 	char host_log[100] = {0};
709 	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
710 
711 	dev_priv->vmw_chipset = pci_id;
712 	dev_priv->last_read_seqno = (uint32_t) -100;
713 	dev_priv->drm.dev_private = dev_priv;
714 
715 	mutex_init(&dev_priv->cmdbuf_mutex);
716 	mutex_init(&dev_priv->binding_mutex);
717 	ttm_lock_init(&dev_priv->reservation_sem);
718 	spin_lock_init(&dev_priv->resource_lock);
719 	spin_lock_init(&dev_priv->hw_lock);
720 	spin_lock_init(&dev_priv->waiter_lock);
721 	spin_lock_init(&dev_priv->cap_lock);
722 	spin_lock_init(&dev_priv->cursor_lock);
723 
724 	ret = vmw_setup_pci_resources(dev_priv, pci_id);
725 	if (ret)
726 		return ret;
727 	ret = vmw_detect_version(dev_priv);
728 	if (ret)
729 		goto out_no_pci_or_version;
730 
731 
732 	for (i = vmw_res_context; i < vmw_res_max; ++i) {
733 		idr_init(&dev_priv->res_idr[i]);
734 		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
735 	}
736 
737 	init_waitqueue_head(&dev_priv->fence_queue);
738 	init_waitqueue_head(&dev_priv->fifo_queue);
739 	dev_priv->fence_queue_waiters = 0;
740 	dev_priv->fifo_queue_waiters = 0;
741 
742 	dev_priv->used_memory_size = 0;
743 
744 	dev_priv->assume_16bpp = !!vmw_assume_16bpp;
745 
746 	dev_priv->enable_fb = enable_fbdev;
747 
748 
749 	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
750 
751 	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
752 		dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
753 	}
754 
755 
756 	ret = vmw_dma_select_mode(dev_priv);
757 	if (unlikely(ret != 0)) {
758 		DRM_INFO("Restricting capabilities since DMA not available.\n");
759 		refuse_dma = true;
760 		if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
761 			DRM_INFO("Disabling 3D acceleration.\n");
762 	}
763 
764 	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
765 	dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
766 	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
767 	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
768 
769 	vmw_get_initial_size(dev_priv);
770 
771 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
772 		dev_priv->max_gmr_ids =
773 			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
774 		dev_priv->max_gmr_pages =
775 			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
776 		dev_priv->memory_size =
777 			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
778 		dev_priv->memory_size -= dev_priv->vram_size;
779 	} else {
780 		/*
781 		 * An arbitrary limit of 512MiB on surface
782 		 * memory. But all HWV8 hardware supports GMR2.
783 		 */
784 		dev_priv->memory_size = 512*1024*1024;
785 	}
786 	dev_priv->max_mob_pages = 0;
787 	dev_priv->max_mob_size = 0;
788 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
789 		uint64_t mem_size;
790 
791 		if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
792 			mem_size = vmw_read(dev_priv,
793 					    SVGA_REG_GBOBJECT_MEM_SIZE_KB);
794 		else
795 			mem_size =
796 				vmw_read(dev_priv,
797 					 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
798 
799 		/*
800 		 * Workaround for low memory 2D VMs to compensate for the
801 		 * allocation taken by fbdev
802 		 */
803 		if (!(dev_priv->capabilities & SVGA_CAP_3D))
804 			mem_size *= 3;
805 
806 		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
807 		dev_priv->prim_bb_mem =
808 			vmw_read(dev_priv,
809 				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
810 		dev_priv->max_mob_size =
811 			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
812 		dev_priv->stdu_max_width =
813 			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
814 		dev_priv->stdu_max_height =
815 			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
816 
817 		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
818 			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
819 		dev_priv->texture_max_width = vmw_read(dev_priv,
820 						       SVGA_REG_DEV_CAP);
821 		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
822 			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
823 		dev_priv->texture_max_height = vmw_read(dev_priv,
824 							SVGA_REG_DEV_CAP);
825 	} else {
826 		dev_priv->texture_max_width = 8192;
827 		dev_priv->texture_max_height = 8192;
828 		dev_priv->prim_bb_mem = dev_priv->vram_size;
829 	}
830 
831 	vmw_print_capabilities(dev_priv->capabilities);
832 	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
833 		vmw_print_capabilities2(dev_priv->capabilities2);
834 
835 	ret = vmw_dma_masks(dev_priv);
836 	if (unlikely(ret != 0))
837 		goto out_err0;
838 
839 	dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
840 
841 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
842 		DRM_INFO("Max GMR ids is %u\n",
843 			 (unsigned)dev_priv->max_gmr_ids);
844 		DRM_INFO("Max number of GMR pages is %u\n",
845 			 (unsigned)dev_priv->max_gmr_pages);
846 		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
847 			 (unsigned)dev_priv->memory_size / 1024);
848 	}
849 	DRM_INFO("Maximum display memory size is %llu kiB\n",
850 		 (uint64_t)dev_priv->prim_bb_mem / 1024);
851 
852 	/* Need mmio memory to check for fifo pitchlock cap. */
853 	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
854 	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
855 	    !vmw_fifo_have_pitchlock(dev_priv)) {
856 		ret = -ENOSYS;
857 		DRM_ERROR("Hardware has no pitchlock\n");
858 		goto out_err0;
859 	}
860 
861 	dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
862 						&vmw_prime_dmabuf_ops);
863 
864 	if (unlikely(dev_priv->tdev == NULL)) {
865 		DRM_ERROR("Unable to initialize TTM object management.\n");
866 		ret = -ENOMEM;
867 		goto out_err0;
868 	}
869 
870 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
871 		ret = vmw_irq_install(&dev_priv->drm, pdev->irq);
872 		if (ret != 0) {
873 			DRM_ERROR("Failed installing irq: %d\n", ret);
874 			goto out_no_irq;
875 		}
876 	}
877 
878 	dev_priv->fman = vmw_fence_manager_init(dev_priv);
879 	if (unlikely(dev_priv->fman == NULL)) {
880 		ret = -ENOMEM;
881 		goto out_no_fman;
882 	}
883 
884 	drm_vma_offset_manager_init(&dev_priv->vma_manager,
885 				    DRM_FILE_PAGE_OFFSET_START,
886 				    DRM_FILE_PAGE_OFFSET_SIZE);
887 	ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
888 				 dev_priv->drm.dev,
889 				 dev_priv->drm.anon_inode->i_mapping,
890 				 &dev_priv->vma_manager,
891 				 dev_priv->map_mode == vmw_dma_alloc_coherent,
892 				 false);
893 	if (unlikely(ret != 0)) {
894 		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
895 		goto out_no_bdev;
896 	}
897 
898 	/*
899 	 * Enable VRAM, but initially don't use it until SVGA is enabled and
900 	 * unhidden.
901 	 */
902 
903 	ret = vmw_vram_manager_init(dev_priv);
904 	if (unlikely(ret != 0)) {
905 		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
906 		goto out_no_vram;
907 	}
908 
909 	/*
910 	 * "Guest Memory Regions" is an aperture like feature with
911 	 *  one slot per bo. There is an upper limit of the number of
912 	 *  slots as well as the bo size.
913 	 */
914 	dev_priv->has_gmr = true;
915 	/* TODO: This is most likely not correct */
916 	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
917 	    refuse_dma ||
918 	    vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
919 		DRM_INFO("No GMR memory available. "
920 			 "Graphics memory resources are very limited.\n");
921 		dev_priv->has_gmr = false;
922 	}
923 
924 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
925 		dev_priv->has_mob = true;
926 
927 		if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
928 			DRM_INFO("No MOB memory available. "
929 				 "3D will be disabled.\n");
930 			dev_priv->has_mob = false;
931 		}
932 	}
933 
934 	if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
935 		spin_lock(&dev_priv->cap_lock);
936 		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
937 		if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
938 			dev_priv->sm_type = VMW_SM_4;
939 		spin_unlock(&dev_priv->cap_lock);
940 	}
941 
942 	vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
943 
944 	/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
945 	if (has_sm4_context(dev_priv) &&
946 	    (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
947 		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
948 
949 		if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
950 			dev_priv->sm_type = VMW_SM_4_1;
951 
952 		if (has_sm4_1_context(dev_priv) &&
953 		    (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
954 			vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5);
955 			if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
956 				dev_priv->sm_type = VMW_SM_5;
957 		}
958 	}
959 
960 	ret = vmw_kms_init(dev_priv);
961 	if (unlikely(ret != 0))
962 		goto out_no_kms;
963 	vmw_overlay_init(dev_priv);
964 
965 	ret = vmw_request_device(dev_priv);
966 	if (ret)
967 		goto out_no_fifo;
968 
969 	DRM_INFO("Atomic: %s\n", (dev_priv->drm.driver->driver_features & DRIVER_ATOMIC)
970 		 ? "yes." : "no.");
971 	if (dev_priv->sm_type == VMW_SM_5)
972 		DRM_INFO("SM5 support available.\n");
973 	if (dev_priv->sm_type == VMW_SM_4_1)
974 		DRM_INFO("SM4_1 support available.\n");
975 	if (dev_priv->sm_type == VMW_SM_4)
976 		DRM_INFO("SM4 support available.\n");
977 
978 	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
979 		VMWGFX_REPO, VMWGFX_GIT_VERSION);
980 	vmw_host_log(host_log);
981 
982 	memset(host_log, 0, sizeof(host_log));
983 	snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
984 		VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
985 		VMWGFX_DRIVER_PATCHLEVEL);
986 	vmw_host_log(host_log);
987 
988 	if (dev_priv->enable_fb) {
989 		vmw_fifo_resource_inc(dev_priv);
990 		vmw_svga_enable(dev_priv);
991 		vmw_fb_init(dev_priv);
992 	}
993 
994 	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
995 	register_pm_notifier(&dev_priv->pm_nb);
996 
997 	return 0;
998 
999 out_no_fifo:
1000 	vmw_overlay_close(dev_priv);
1001 	vmw_kms_close(dev_priv);
1002 out_no_kms:
1003 	if (dev_priv->has_mob)
1004 		vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1005 	if (dev_priv->has_gmr)
1006 		vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1007 	vmw_vram_manager_fini(dev_priv);
1008 out_no_vram:
1009 	(void)ttm_bo_device_release(&dev_priv->bdev);
1010 out_no_bdev:
1011 	vmw_fence_manager_takedown(dev_priv->fman);
1012 out_no_fman:
1013 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1014 		vmw_irq_uninstall(&dev_priv->drm);
1015 out_no_irq:
1016 	ttm_object_device_release(&dev_priv->tdev);
1017 out_err0:
1018 	for (i = vmw_res_context; i < vmw_res_max; ++i)
1019 		idr_destroy(&dev_priv->res_idr[i]);
1020 
1021 	if (dev_priv->ctx.staged_bindings)
1022 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1023 out_no_pci_or_version:
1024 	pci_release_regions(pdev);
1025 	return ret;
1026 }
1027 
1028 static void vmw_driver_unload(struct drm_device *dev)
1029 {
1030 	struct vmw_private *dev_priv = vmw_priv(dev);
1031 	struct pci_dev *pdev = to_pci_dev(dev->dev);
1032 	enum vmw_res_type i;
1033 
1034 	unregister_pm_notifier(&dev_priv->pm_nb);
1035 
1036 	if (dev_priv->ctx.res_ht_initialized)
1037 		drm_ht_remove(&dev_priv->ctx.res_ht);
1038 	vfree(dev_priv->ctx.cmd_bounce);
1039 	if (dev_priv->enable_fb) {
1040 		vmw_fb_off(dev_priv);
1041 		vmw_fb_close(dev_priv);
1042 		vmw_fifo_resource_dec(dev_priv);
1043 		vmw_svga_disable(dev_priv);
1044 	}
1045 
1046 	vmw_kms_close(dev_priv);
1047 	vmw_overlay_close(dev_priv);
1048 
1049 	if (dev_priv->has_gmr)
1050 		vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1051 
1052 	vmw_release_device_early(dev_priv);
1053 	if (dev_priv->has_mob)
1054 		vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1055 	vmw_vram_manager_fini(dev_priv);
1056 	(void) ttm_bo_device_release(&dev_priv->bdev);
1057 	drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
1058 	vmw_release_device_late(dev_priv);
1059 	vmw_fence_manager_takedown(dev_priv->fman);
1060 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1061 		vmw_irq_uninstall(&dev_priv->drm);
1062 
1063 	ttm_object_device_release(&dev_priv->tdev);
1064 	if (dev_priv->ctx.staged_bindings)
1065 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1066 
1067 	for (i = vmw_res_context; i < vmw_res_max; ++i)
1068 		idr_destroy(&dev_priv->res_idr[i]);
1069 
1070 	pci_release_regions(pdev);
1071 }
1072 
1073 static void vmw_postclose(struct drm_device *dev,
1074 			 struct drm_file *file_priv)
1075 {
1076 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1077 
1078 	ttm_object_file_release(&vmw_fp->tfile);
1079 	kfree(vmw_fp);
1080 }
1081 
1082 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1083 {
1084 	struct vmw_private *dev_priv = vmw_priv(dev);
1085 	struct vmw_fpriv *vmw_fp;
1086 	int ret = -ENOMEM;
1087 
1088 	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1089 	if (unlikely(!vmw_fp))
1090 		return ret;
1091 
1092 	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1093 	if (unlikely(vmw_fp->tfile == NULL))
1094 		goto out_no_tfile;
1095 
1096 	file_priv->driver_priv = vmw_fp;
1097 
1098 	return 0;
1099 
1100 out_no_tfile:
1101 	kfree(vmw_fp);
1102 	return ret;
1103 }
1104 
1105 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1106 			      unsigned long arg,
1107 			      long (*ioctl_func)(struct file *, unsigned int,
1108 						 unsigned long))
1109 {
1110 	struct drm_file *file_priv = filp->private_data;
1111 	struct drm_device *dev = file_priv->minor->dev;
1112 	unsigned int nr = DRM_IOCTL_NR(cmd);
1113 	unsigned int flags;
1114 
1115 	/*
1116 	 * Do extra checking on driver private ioctls.
1117 	 */
1118 
1119 	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1120 	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1121 		const struct drm_ioctl_desc *ioctl =
1122 			&vmw_ioctls[nr - DRM_COMMAND_BASE];
1123 
1124 		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1125 			return ioctl_func(filp, cmd, arg);
1126 		} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1127 			if (!drm_is_current_master(file_priv) &&
1128 			    !capable(CAP_SYS_ADMIN))
1129 				return -EACCES;
1130 		}
1131 
1132 		if (unlikely(ioctl->cmd != cmd))
1133 			goto out_io_encoding;
1134 
1135 		flags = ioctl->flags;
1136 	} else if (!drm_ioctl_flags(nr, &flags))
1137 		return -EINVAL;
1138 
1139 	return ioctl_func(filp, cmd, arg);
1140 
1141 out_io_encoding:
1142 	DRM_ERROR("Invalid command format, ioctl %d\n",
1143 		  nr - DRM_COMMAND_BASE);
1144 
1145 	return -EINVAL;
1146 }
1147 
1148 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1149 			       unsigned long arg)
1150 {
1151 	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1152 }
1153 
1154 #ifdef CONFIG_COMPAT
1155 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1156 			     unsigned long arg)
1157 {
1158 	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1159 }
1160 #endif
1161 
1162 static void vmw_master_set(struct drm_device *dev,
1163 			   struct drm_file *file_priv,
1164 			   bool from_open)
1165 {
1166 	/*
1167 	 * Inform a new master that the layout may have changed while
1168 	 * it was gone.
1169 	 */
1170 	if (!from_open)
1171 		drm_sysfs_hotplug_event(dev);
1172 }
1173 
1174 static void vmw_master_drop(struct drm_device *dev,
1175 			    struct drm_file *file_priv)
1176 {
1177 	struct vmw_private *dev_priv = vmw_priv(dev);
1178 
1179 	vmw_kms_legacy_hotspot_clear(dev_priv);
1180 	if (!dev_priv->enable_fb)
1181 		vmw_svga_disable(dev_priv);
1182 }
1183 
1184 /**
1185  * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1186  *
1187  * @dev_priv: Pointer to device private struct.
1188  * Needs the reservation sem to be held in non-exclusive mode.
1189  */
1190 static void __vmw_svga_enable(struct vmw_private *dev_priv)
1191 {
1192 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1193 
1194 	if (!ttm_resource_manager_used(man)) {
1195 		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1196 		ttm_resource_manager_set_used(man, true);
1197 	}
1198 }
1199 
1200 /**
1201  * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1202  *
1203  * @dev_priv: Pointer to device private struct.
1204  */
1205 void vmw_svga_enable(struct vmw_private *dev_priv)
1206 {
1207 	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
1208 	__vmw_svga_enable(dev_priv);
1209 	ttm_read_unlock(&dev_priv->reservation_sem);
1210 }
1211 
1212 /**
1213  * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1214  *
1215  * @dev_priv: Pointer to device private struct.
1216  * Needs the reservation sem to be held in exclusive mode.
1217  * Will not empty VRAM. VRAM must be emptied by caller.
1218  */
1219 static void __vmw_svga_disable(struct vmw_private *dev_priv)
1220 {
1221 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1222 
1223 	if (ttm_resource_manager_used(man)) {
1224 		ttm_resource_manager_set_used(man, false);
1225 		vmw_write(dev_priv, SVGA_REG_ENABLE,
1226 			  SVGA_REG_ENABLE_HIDE |
1227 			  SVGA_REG_ENABLE_ENABLE);
1228 	}
1229 }
1230 
1231 /**
1232  * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1233  * running.
1234  *
1235  * @dev_priv: Pointer to device private struct.
1236  * Will empty VRAM.
1237  */
1238 void vmw_svga_disable(struct vmw_private *dev_priv)
1239 {
1240 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1241 	/*
1242 	 * Disabling SVGA will turn off device modesetting capabilities, so
1243 	 * notify KMS about that so that it doesn't cache atomic state that
1244 	 * isn't valid anymore, for example crtcs turned on.
1245 	 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1246 	 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1247 	 * end up with lock order reversal. Thus, a master may actually perform
1248 	 * a new modeset just after we call vmw_kms_lost_device() and race with
1249 	 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1250 	 * to be inconsistent with the device, causing modesetting problems.
1251 	 *
1252 	 */
1253 	vmw_kms_lost_device(&dev_priv->drm);
1254 	ttm_write_lock(&dev_priv->reservation_sem, false);
1255 	if (ttm_resource_manager_used(man)) {
1256 		if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
1257 			DRM_ERROR("Failed evicting VRAM buffers.\n");
1258 		ttm_resource_manager_set_used(man, false);
1259 		vmw_write(dev_priv, SVGA_REG_ENABLE,
1260 			  SVGA_REG_ENABLE_HIDE |
1261 			  SVGA_REG_ENABLE_ENABLE);
1262 	}
1263 	ttm_write_unlock(&dev_priv->reservation_sem);
1264 }
1265 
1266 static void vmw_remove(struct pci_dev *pdev)
1267 {
1268 	struct drm_device *dev = pci_get_drvdata(pdev);
1269 
1270 	drm_dev_unregister(dev);
1271 	vmw_driver_unload(dev);
1272 }
1273 
1274 static unsigned long
1275 vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
1276 		      unsigned long len, unsigned long pgoff,
1277 		      unsigned long flags)
1278 {
1279 	struct drm_file *file_priv = file->private_data;
1280 	struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
1281 
1282 	return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
1283 				     &dev_priv->vma_manager);
1284 }
1285 
1286 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1287 			      void *ptr)
1288 {
1289 	struct vmw_private *dev_priv =
1290 		container_of(nb, struct vmw_private, pm_nb);
1291 
1292 	switch (val) {
1293 	case PM_HIBERNATION_PREPARE:
1294 		/*
1295 		 * Take the reservation sem in write mode, which will make sure
1296 		 * there are no other processes holding a buffer object
1297 		 * reservation, meaning we should be able to evict all buffer
1298 		 * objects if needed.
1299 		 * Once user-space processes have been frozen, we can release
1300 		 * the lock again.
1301 		 */
1302 		ttm_suspend_lock(&dev_priv->reservation_sem);
1303 		dev_priv->suspend_locked = true;
1304 		break;
1305 	case PM_POST_HIBERNATION:
1306 	case PM_POST_RESTORE:
1307 		if (READ_ONCE(dev_priv->suspend_locked)) {
1308 			dev_priv->suspend_locked = false;
1309 			ttm_suspend_unlock(&dev_priv->reservation_sem);
1310 		}
1311 		break;
1312 	default:
1313 		break;
1314 	}
1315 	return 0;
1316 }
1317 
1318 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1319 {
1320 	struct drm_device *dev = pci_get_drvdata(pdev);
1321 	struct vmw_private *dev_priv = vmw_priv(dev);
1322 
1323 	if (dev_priv->refuse_hibernation)
1324 		return -EBUSY;
1325 
1326 	pci_save_state(pdev);
1327 	pci_disable_device(pdev);
1328 	pci_set_power_state(pdev, PCI_D3hot);
1329 	return 0;
1330 }
1331 
1332 static int vmw_pci_resume(struct pci_dev *pdev)
1333 {
1334 	pci_set_power_state(pdev, PCI_D0);
1335 	pci_restore_state(pdev);
1336 	return pci_enable_device(pdev);
1337 }
1338 
1339 static int vmw_pm_suspend(struct device *kdev)
1340 {
1341 	struct pci_dev *pdev = to_pci_dev(kdev);
1342 	struct pm_message dummy;
1343 
1344 	dummy.event = 0;
1345 
1346 	return vmw_pci_suspend(pdev, dummy);
1347 }
1348 
1349 static int vmw_pm_resume(struct device *kdev)
1350 {
1351 	struct pci_dev *pdev = to_pci_dev(kdev);
1352 
1353 	return vmw_pci_resume(pdev);
1354 }
1355 
1356 static int vmw_pm_freeze(struct device *kdev)
1357 {
1358 	struct pci_dev *pdev = to_pci_dev(kdev);
1359 	struct drm_device *dev = pci_get_drvdata(pdev);
1360 	struct vmw_private *dev_priv = vmw_priv(dev);
1361 	struct ttm_operation_ctx ctx = {
1362 		.interruptible = false,
1363 		.no_wait_gpu = false
1364 	};
1365 	int ret;
1366 
1367 	/*
1368 	 * Unlock for vmw_kms_suspend.
1369 	 * No user-space processes should be running now.
1370 	 */
1371 	ttm_suspend_unlock(&dev_priv->reservation_sem);
1372 	ret = vmw_kms_suspend(&dev_priv->drm);
1373 	if (ret) {
1374 		ttm_suspend_lock(&dev_priv->reservation_sem);
1375 		DRM_ERROR("Failed to freeze modesetting.\n");
1376 		return ret;
1377 	}
1378 	if (dev_priv->enable_fb)
1379 		vmw_fb_off(dev_priv);
1380 
1381 	ttm_suspend_lock(&dev_priv->reservation_sem);
1382 	vmw_execbuf_release_pinned_bo(dev_priv);
1383 	vmw_resource_evict_all(dev_priv);
1384 	vmw_release_device_early(dev_priv);
1385 	while (ttm_bo_swapout(&ctx) == 0);
1386 	if (dev_priv->enable_fb)
1387 		vmw_fifo_resource_dec(dev_priv);
1388 	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1389 		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1390 		if (dev_priv->enable_fb)
1391 			vmw_fifo_resource_inc(dev_priv);
1392 		WARN_ON(vmw_request_device_late(dev_priv));
1393 		dev_priv->suspend_locked = false;
1394 		ttm_suspend_unlock(&dev_priv->reservation_sem);
1395 		if (dev_priv->suspend_state)
1396 			vmw_kms_resume(dev);
1397 		if (dev_priv->enable_fb)
1398 			vmw_fb_on(dev_priv);
1399 		return -EBUSY;
1400 	}
1401 
1402 	vmw_fence_fifo_down(dev_priv->fman);
1403 	__vmw_svga_disable(dev_priv);
1404 
1405 	vmw_release_device_late(dev_priv);
1406 	return 0;
1407 }
1408 
1409 static int vmw_pm_restore(struct device *kdev)
1410 {
1411 	struct pci_dev *pdev = to_pci_dev(kdev);
1412 	struct drm_device *dev = pci_get_drvdata(pdev);
1413 	struct vmw_private *dev_priv = vmw_priv(dev);
1414 	int ret;
1415 
1416 	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1417 	(void) vmw_read(dev_priv, SVGA_REG_ID);
1418 
1419 	if (dev_priv->enable_fb)
1420 		vmw_fifo_resource_inc(dev_priv);
1421 
1422 	ret = vmw_request_device(dev_priv);
1423 	if (ret)
1424 		return ret;
1425 
1426 	if (dev_priv->enable_fb)
1427 		__vmw_svga_enable(dev_priv);
1428 
1429 	vmw_fence_fifo_up(dev_priv->fman);
1430 	dev_priv->suspend_locked = false;
1431 	ttm_suspend_unlock(&dev_priv->reservation_sem);
1432 	if (dev_priv->suspend_state)
1433 		vmw_kms_resume(&dev_priv->drm);
1434 
1435 	if (dev_priv->enable_fb)
1436 		vmw_fb_on(dev_priv);
1437 
1438 	return 0;
1439 }
1440 
1441 static const struct dev_pm_ops vmw_pm_ops = {
1442 	.freeze = vmw_pm_freeze,
1443 	.thaw = vmw_pm_restore,
1444 	.restore = vmw_pm_restore,
1445 	.suspend = vmw_pm_suspend,
1446 	.resume = vmw_pm_resume,
1447 };
1448 
1449 static const struct file_operations vmwgfx_driver_fops = {
1450 	.owner = THIS_MODULE,
1451 	.open = drm_open,
1452 	.release = drm_release,
1453 	.unlocked_ioctl = vmw_unlocked_ioctl,
1454 	.mmap = vmw_mmap,
1455 	.poll = vmw_fops_poll,
1456 	.read = vmw_fops_read,
1457 #if defined(CONFIG_COMPAT)
1458 	.compat_ioctl = vmw_compat_ioctl,
1459 #endif
1460 	.llseek = noop_llseek,
1461 	.get_unmapped_area = vmw_get_unmapped_area,
1462 };
1463 
1464 static const struct drm_driver driver = {
1465 	.driver_features =
1466 	DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
1467 	.ioctls = vmw_ioctls,
1468 	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
1469 	.master_set = vmw_master_set,
1470 	.master_drop = vmw_master_drop,
1471 	.open = vmw_driver_open,
1472 	.postclose = vmw_postclose,
1473 
1474 	.dumb_create = vmw_dumb_create,
1475 	.dumb_map_offset = vmw_dumb_map_offset,
1476 	.dumb_destroy = vmw_dumb_destroy,
1477 
1478 	.prime_fd_to_handle = vmw_prime_fd_to_handle,
1479 	.prime_handle_to_fd = vmw_prime_handle_to_fd,
1480 
1481 	.fops = &vmwgfx_driver_fops,
1482 	.name = VMWGFX_DRIVER_NAME,
1483 	.desc = VMWGFX_DRIVER_DESC,
1484 	.date = VMWGFX_DRIVER_DATE,
1485 	.major = VMWGFX_DRIVER_MAJOR,
1486 	.minor = VMWGFX_DRIVER_MINOR,
1487 	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1488 };
1489 
1490 static struct pci_driver vmw_pci_driver = {
1491 	.name = VMWGFX_DRIVER_NAME,
1492 	.id_table = vmw_pci_id_list,
1493 	.probe = vmw_probe,
1494 	.remove = vmw_remove,
1495 	.driver = {
1496 		.pm = &vmw_pm_ops
1497 	}
1498 };
1499 
1500 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1501 {
1502 	struct vmw_private *vmw;
1503 	int ret;
1504 
1505 	ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
1506 	if (ret)
1507 		return ret;
1508 
1509 	ret = pcim_enable_device(pdev);
1510 	if (ret)
1511 		return ret;
1512 
1513 	vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
1514 				 struct vmw_private, drm);
1515 	if (IS_ERR(vmw))
1516 		return PTR_ERR(vmw);
1517 
1518 	vmw->drm.pdev = pdev;
1519 	pci_set_drvdata(pdev, &vmw->drm);
1520 
1521 	ret = vmw_driver_load(vmw, ent->device);
1522 	if (ret)
1523 		return ret;
1524 
1525 	ret = drm_dev_register(&vmw->drm, 0);
1526 	if (ret) {
1527 		vmw_driver_unload(&vmw->drm);
1528 		return ret;
1529 	}
1530 
1531 	return 0;
1532 }
1533 
1534 static int __init vmwgfx_init(void)
1535 {
1536 	int ret;
1537 
1538 	if (vgacon_text_force())
1539 		return -EINVAL;
1540 
1541 	ret = pci_register_driver(&vmw_pci_driver);
1542 	if (ret)
1543 		DRM_ERROR("Failed initializing DRM.\n");
1544 	return ret;
1545 }
1546 
1547 static void __exit vmwgfx_exit(void)
1548 {
1549 	pci_unregister_driver(&vmw_pci_driver);
1550 }
1551 
1552 module_init(vmwgfx_init);
1553 module_exit(vmwgfx_exit);
1554 
1555 MODULE_AUTHOR("VMware Inc. and others");
1556 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1557 MODULE_LICENSE("GPL and additional rights");
1558 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1559 	       __stringify(VMWGFX_DRIVER_MINOR) "."
1560 	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1561 	       "0");
1562