1dff96888SDirk Hohndel (VMware) // SPDX-License-Identifier: GPL-2.0 OR MIT
2fb1d9738SJakob Bornecrantz /**************************************************************************
3fb1d9738SJakob Bornecrantz  *
409881d29SZack Rusin  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5fb1d9738SJakob Bornecrantz  *
6fb1d9738SJakob Bornecrantz  * Permission is hereby granted, free of charge, to any person obtaining a
7fb1d9738SJakob Bornecrantz  * copy of this software and associated documentation files (the
8fb1d9738SJakob Bornecrantz  * "Software"), to deal in the Software without restriction, including
9fb1d9738SJakob Bornecrantz  * without limitation the rights to use, copy, modify, merge, publish,
10fb1d9738SJakob Bornecrantz  * distribute, sub license, and/or sell copies of the Software, and to
11fb1d9738SJakob Bornecrantz  * permit persons to whom the Software is furnished to do so, subject to
12fb1d9738SJakob Bornecrantz  * the following conditions:
13fb1d9738SJakob Bornecrantz  *
14fb1d9738SJakob Bornecrantz  * The above copyright notice and this permission notice (including the
15fb1d9738SJakob Bornecrantz  * next paragraph) shall be included in all copies or substantial portions
16fb1d9738SJakob Bornecrantz  * of the Software.
17fb1d9738SJakob Bornecrantz  *
18fb1d9738SJakob Bornecrantz  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19fb1d9738SJakob Bornecrantz  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20fb1d9738SJakob Bornecrantz  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21fb1d9738SJakob Bornecrantz  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22fb1d9738SJakob Bornecrantz  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23fb1d9738SJakob Bornecrantz  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24fb1d9738SJakob Bornecrantz  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25fb1d9738SJakob Bornecrantz  *
26fb1d9738SJakob Bornecrantz  **************************************************************************/
276ae8748bSSam Ravnborg 
287f4c3377SZack Rusin 
297f4c3377SZack Rusin #include "vmwgfx_drv.h"
307f4c3377SZack Rusin 
3109881d29SZack Rusin #include "vmwgfx_bo.h"
3209881d29SZack Rusin #include "vmwgfx_binding.h"
337f4c3377SZack Rusin #include "vmwgfx_devcaps.h"
347f4c3377SZack Rusin #include "vmwgfx_mksstat.h"
357f4c3377SZack Rusin #include "ttm_object.h"
36fb1d9738SJakob Bornecrantz 
376848c291SThomas Zimmermann #include <drm/drm_aperture.h>
386ae8748bSSam Ravnborg #include <drm/drm_drv.h>
398ab59da2SThomas Zimmermann #include <drm/drm_fbdev_generic.h>
408afa13a0SZack Rusin #include <drm/drm_gem_ttm_helper.h>
41df8d1d0aSThomas Zimmermann #include <drm/drm_ioctl.h>
42df8d1d0aSThomas Zimmermann #include <drm/drm_module.h>
43df8d1d0aSThomas Zimmermann #include <drm/drm_sysfs.h>
44352a81b7SZack Rusin #include <drm/ttm/ttm_range_manager.h>
456ae8748bSSam Ravnborg #include <drm/ttm/ttm_placement.h>
46523375c9SZack Rusin #include <generated/utsrelease.h>
476ae8748bSSam Ravnborg 
4835d86fb6SZack Rusin #ifdef CONFIG_X86
4935d86fb6SZack Rusin #include <asm/hypervisor.h>
5035d86fb6SZack Rusin #endif
517f4c3377SZack Rusin #include <linux/cc_platform.h>
527f4c3377SZack Rusin #include <linux/dma-mapping.h>
537f4c3377SZack Rusin #include <linux/module.h>
547f4c3377SZack Rusin #include <linux/pci.h>
557f4c3377SZack Rusin #include <linux/version.h>
56fb1d9738SJakob Bornecrantz 
57fb1d9738SJakob Bornecrantz #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
58fb1d9738SJakob Bornecrantz 
592cd80dbdSZack Rusin /*
60fb1d9738SJakob Bornecrantz  * Fully encoded drm commands. Might move to vmw_drm.h
61fb1d9738SJakob Bornecrantz  */
62fb1d9738SJakob Bornecrantz 
63fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_GET_PARAM					\
64fb1d9738SJakob Bornecrantz 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
65fb1d9738SJakob Bornecrantz 		 struct drm_vmw_getparam_arg)
66fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_ALLOC_DMABUF				\
67fb1d9738SJakob Bornecrantz 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
68fb1d9738SJakob Bornecrantz 		union drm_vmw_alloc_dmabuf_arg)
69fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_UNREF_DMABUF				\
70fb1d9738SJakob Bornecrantz 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
71fb1d9738SJakob Bornecrantz 		struct drm_vmw_unref_dmabuf_arg)
72fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_CURSOR_BYPASS				\
73fb1d9738SJakob Bornecrantz 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
74fb1d9738SJakob Bornecrantz 		 struct drm_vmw_cursor_bypass_arg)
75fb1d9738SJakob Bornecrantz 
76fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_CONTROL_STREAM				\
77fb1d9738SJakob Bornecrantz 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
78fb1d9738SJakob Bornecrantz 		 struct drm_vmw_control_stream_arg)
79fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_CLAIM_STREAM				\
80fb1d9738SJakob Bornecrantz 	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
81fb1d9738SJakob Bornecrantz 		 struct drm_vmw_stream_arg)
82fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_UNREF_STREAM				\
83fb1d9738SJakob Bornecrantz 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
84fb1d9738SJakob Bornecrantz 		 struct drm_vmw_stream_arg)
85fb1d9738SJakob Bornecrantz 
86fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_CREATE_CONTEXT				\
87fb1d9738SJakob Bornecrantz 	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
88fb1d9738SJakob Bornecrantz 		struct drm_vmw_context_arg)
89fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_UNREF_CONTEXT				\
90fb1d9738SJakob Bornecrantz 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
91fb1d9738SJakob Bornecrantz 		struct drm_vmw_context_arg)
92fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_CREATE_SURFACE				\
93fb1d9738SJakob Bornecrantz 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
94fb1d9738SJakob Bornecrantz 		 union drm_vmw_surface_create_arg)
95fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_UNREF_SURFACE				\
96fb1d9738SJakob Bornecrantz 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
97fb1d9738SJakob Bornecrantz 		 struct drm_vmw_surface_arg)
98fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_REF_SURFACE				\
99fb1d9738SJakob Bornecrantz 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
100fb1d9738SJakob Bornecrantz 		 union drm_vmw_surface_reference_arg)
101fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_EXECBUF					\
102fb1d9738SJakob Bornecrantz 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
103fb1d9738SJakob Bornecrantz 		struct drm_vmw_execbuf_arg)
104f63f6a59SThomas Hellstrom #define DRM_IOCTL_VMW_GET_3D_CAP				\
105f63f6a59SThomas Hellstrom 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
106f63f6a59SThomas Hellstrom 		 struct drm_vmw_get_3d_cap_arg)
107ae2a1040SThomas Hellstrom #define DRM_IOCTL_VMW_FENCE_WAIT				\
108ae2a1040SThomas Hellstrom 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
109ae2a1040SThomas Hellstrom 		 struct drm_vmw_fence_wait_arg)
110ae2a1040SThomas Hellstrom #define DRM_IOCTL_VMW_FENCE_SIGNALED				\
111ae2a1040SThomas Hellstrom 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
112ae2a1040SThomas Hellstrom 		 struct drm_vmw_fence_signaled_arg)
113ae2a1040SThomas Hellstrom #define DRM_IOCTL_VMW_FENCE_UNREF				\
114ae2a1040SThomas Hellstrom 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
115ae2a1040SThomas Hellstrom 		 struct drm_vmw_fence_arg)
11657c5ee79SThomas Hellstrom #define DRM_IOCTL_VMW_FENCE_EVENT				\
11757c5ee79SThomas Hellstrom 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
11857c5ee79SThomas Hellstrom 		 struct drm_vmw_fence_event_arg)
1192fcd5a73SJakob Bornecrantz #define DRM_IOCTL_VMW_PRESENT					\
1202fcd5a73SJakob Bornecrantz 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
1212fcd5a73SJakob Bornecrantz 		 struct drm_vmw_present_arg)
1222fcd5a73SJakob Bornecrantz #define DRM_IOCTL_VMW_PRESENT_READBACK				\
1232fcd5a73SJakob Bornecrantz 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
1242fcd5a73SJakob Bornecrantz 		 struct drm_vmw_present_readback_arg)
125cd2b89e7SThomas Hellstrom #define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
126cd2b89e7SThomas Hellstrom 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
127cd2b89e7SThomas Hellstrom 		 struct drm_vmw_update_layout_arg)
128c74c162fSThomas Hellstrom #define DRM_IOCTL_VMW_CREATE_SHADER				\
129c74c162fSThomas Hellstrom 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
130c74c162fSThomas Hellstrom 		 struct drm_vmw_shader_create_arg)
131c74c162fSThomas Hellstrom #define DRM_IOCTL_VMW_UNREF_SHADER				\
132c74c162fSThomas Hellstrom 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
133c74c162fSThomas Hellstrom 		 struct drm_vmw_shader_arg)
134a97e2192SThomas Hellstrom #define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
135a97e2192SThomas Hellstrom 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
136a97e2192SThomas Hellstrom 		 union drm_vmw_gb_surface_create_arg)
137a97e2192SThomas Hellstrom #define DRM_IOCTL_VMW_GB_SURFACE_REF				\
138a97e2192SThomas Hellstrom 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
139a97e2192SThomas Hellstrom 		 union drm_vmw_gb_surface_reference_arg)
1401d7a5cbfSThomas Hellstrom #define DRM_IOCTL_VMW_SYNCCPU					\
1411d7a5cbfSThomas Hellstrom 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
1421d7a5cbfSThomas Hellstrom 		 struct drm_vmw_synccpu_arg)
143d80efd5cSThomas Hellstrom #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
144d80efd5cSThomas Hellstrom 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
145d80efd5cSThomas Hellstrom 		struct drm_vmw_context_arg)
14614b1c33eSDeepak Rawat #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT				\
14714b1c33eSDeepak Rawat 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT,	\
14814b1c33eSDeepak Rawat 		union drm_vmw_gb_surface_create_ext_arg)
14914b1c33eSDeepak Rawat #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT				\
15014b1c33eSDeepak Rawat 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT,		\
15114b1c33eSDeepak Rawat 		union drm_vmw_gb_surface_reference_ext_arg)
152cb92a323SRoland Scheidegger #define DRM_IOCTL_VMW_MSG						\
153cb92a323SRoland Scheidegger 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG,			\
154cb92a323SRoland Scheidegger 		struct drm_vmw_msg_arg)
1557a7a933eSMartin Krastev #define DRM_IOCTL_VMW_MKSSTAT_RESET				\
1567a7a933eSMartin Krastev 	DRM_IO(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_RESET)
1577a7a933eSMartin Krastev #define DRM_IOCTL_VMW_MKSSTAT_ADD				\
1587a7a933eSMartin Krastev 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_ADD,	\
1597a7a933eSMartin Krastev 		struct drm_vmw_mksstat_add_arg)
1607a7a933eSMartin Krastev #define DRM_IOCTL_VMW_MKSSTAT_REMOVE				\
1617a7a933eSMartin Krastev 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_REMOVE,	\
1627a7a933eSMartin Krastev 		struct drm_vmw_mksstat_remove_arg)
163fb1d9738SJakob Bornecrantz 
164e68cefd1SLee Jones /*
165fb1d9738SJakob Bornecrantz  * Ioctl definitions.
166fb1d9738SJakob Bornecrantz  */
167fb1d9738SJakob Bornecrantz 
168baa70943SRob Clark static const struct drm_ioctl_desc vmw_ioctls[] = {
169f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl,
1700d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
1718afa13a0SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl,
1720d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
173f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
174f8c47144SDaniel Vetter 			  DRM_RENDER_ALLOW),
175f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_CURSOR_BYPASS,
176e1f78003SThomas Hellstrom 			  vmw_kms_cursor_bypass_ioctl,
177190c462dSDaniel Vetter 			  DRM_MASTER),
178fb1d9738SJakob Bornecrantz 
179f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
180190c462dSDaniel Vetter 			  DRM_MASTER),
181f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
182190c462dSDaniel Vetter 			  DRM_MASTER),
183f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
184190c462dSDaniel Vetter 			  DRM_MASTER),
185fb1d9738SJakob Bornecrantz 
186f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
1870d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
188f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
189f8c47144SDaniel Vetter 			  DRM_RENDER_ALLOW),
190f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
1910d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
192f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
193f8c47144SDaniel Vetter 			  DRM_RENDER_ALLOW),
194f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
1950d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
196f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_EXECBUF, vmw_execbuf_ioctl,
197d80efd5cSThomas Hellstrom 			  DRM_RENDER_ALLOW),
198f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
199f8c47144SDaniel Vetter 			  DRM_RENDER_ALLOW),
200f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_FENCE_SIGNALED,
201ae2a1040SThomas Hellstrom 			  vmw_fence_obj_signaled_ioctl,
202f8c47144SDaniel Vetter 			  DRM_RENDER_ALLOW),
203f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
204f8c47144SDaniel Vetter 			  DRM_RENDER_ALLOW),
205f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
2060d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
207f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
2080d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
2092fcd5a73SJakob Bornecrantz 
2102fcd5a73SJakob Bornecrantz 	/* these allow direct access to the framebuffers mark as master only */
211f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_PRESENT, vmw_present_ioctl,
212f8c47144SDaniel Vetter 			  DRM_MASTER | DRM_AUTH),
213f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_PRESENT_READBACK,
2142fcd5a73SJakob Bornecrantz 			  vmw_present_readback_ioctl,
215f8c47144SDaniel Vetter 			  DRM_MASTER | DRM_AUTH),
21631788ca8SThomas Hellstrom 	/*
21731788ca8SThomas Hellstrom 	 * The permissions of the below ioctl are overridden in
21831788ca8SThomas Hellstrom 	 * vmw_generic_ioctl(). We require either
21931788ca8SThomas Hellstrom 	 * DRM_MASTER or capable(CAP_SYS_ADMIN).
22031788ca8SThomas Hellstrom 	 */
221f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_UPDATE_LAYOUT,
222cd2b89e7SThomas Hellstrom 			  vmw_kms_update_layout_ioctl,
22331788ca8SThomas Hellstrom 			  DRM_RENDER_ALLOW),
224f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_CREATE_SHADER,
225c74c162fSThomas Hellstrom 			  vmw_shader_define_ioctl,
2260d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
227f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_UNREF_SHADER,
228c74c162fSThomas Hellstrom 			  vmw_shader_destroy_ioctl,
229f8c47144SDaniel Vetter 			  DRM_RENDER_ALLOW),
230f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE,
231a97e2192SThomas Hellstrom 			  vmw_gb_surface_define_ioctl,
2320d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
233f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF,
234a97e2192SThomas Hellstrom 			  vmw_gb_surface_reference_ioctl,
2350d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
236f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_SYNCCPU,
237f1d34bfdSThomas Hellstrom 			  vmw_user_bo_synccpu_ioctl,
238f8c47144SDaniel Vetter 			  DRM_RENDER_ALLOW),
239f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_CREATE_EXTENDED_CONTEXT,
240d80efd5cSThomas Hellstrom 			  vmw_extended_context_define_ioctl,
2410d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
242f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE_EXT,
24314b1c33eSDeepak Rawat 			  vmw_gb_surface_define_ext_ioctl,
2440d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
245f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF_EXT,
24614b1c33eSDeepak Rawat 			  vmw_gb_surface_reference_ext_ioctl,
2470d4c19f9SEmil Velikov 			  DRM_RENDER_ALLOW),
248f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_MSG,
249cb92a323SRoland Scheidegger 			  vmw_msg_ioctl,
250cb92a323SRoland Scheidegger 			  DRM_RENDER_ALLOW),
251f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_RESET,
2527a7a933eSMartin Krastev 			  vmw_mksstat_reset_ioctl,
2537a7a933eSMartin Krastev 			  DRM_RENDER_ALLOW),
254f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_ADD,
2557a7a933eSMartin Krastev 			  vmw_mksstat_add_ioctl,
2567a7a933eSMartin Krastev 			  DRM_RENDER_ALLOW),
257f1f3e375SZack Rusin 	DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_REMOVE,
2587a7a933eSMartin Krastev 			  vmw_mksstat_remove_ioctl,
2597a7a933eSMartin Krastev 			  DRM_RENDER_ALLOW),
260fb1d9738SJakob Bornecrantz };
261fb1d9738SJakob Bornecrantz 
2628046306fSArvind Yadav static const struct pci_device_id vmw_pci_id_list[] = {
2638ad0c3fdSZack Rusin 	{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA2) },
2648ad0c3fdSZack Rusin 	{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA3) },
2658772c0bbSZack Rusin 	{ }
266fb1d9738SJakob Bornecrantz };
267c4903429SDave Airlie MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
268fb1d9738SJakob Bornecrantz 
269d92d9851SThomas Hellstrom static int vmw_restrict_iommu;
270d92d9851SThomas Hellstrom static int vmw_force_coherent;
2710d00c488SThomas Hellstrom static int vmw_restrict_dma_mask;
27204319d89SSinclair Yeh static int vmw_assume_16bpp;
273fb1d9738SJakob Bornecrantz 
274fb1d9738SJakob Bornecrantz static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
275d9f36a00SThomas Hellstrom static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
276d9f36a00SThomas Hellstrom 			      void *ptr);
277fb1d9738SJakob Bornecrantz 
278d92d9851SThomas Hellstrom MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
27950f83737SØyvind A. Holm module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
280d92d9851SThomas Hellstrom MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
28150f83737SØyvind A. Holm module_param_named(force_coherent, vmw_force_coherent, int, 0600);
2820d00c488SThomas Hellstrom MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
2837a9d2001SØyvind A. Holm module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
28404319d89SSinclair Yeh MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
28504319d89SSinclair Yeh module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
286d92d9851SThomas Hellstrom 
28730c78bb8SThomas Hellstrom 
2882b273544SZack Rusin struct bitmap_name {
2892b273544SZack Rusin 	uint32 value;
2902b273544SZack Rusin 	const char *name;
2912b273544SZack Rusin };
2922b273544SZack Rusin 
2932b273544SZack Rusin static const struct bitmap_name cap1_names[] = {
2942b273544SZack Rusin 	{ SVGA_CAP_RECT_COPY, "rect copy" },
2952b273544SZack Rusin 	{ SVGA_CAP_CURSOR, "cursor" },
2962b273544SZack Rusin 	{ SVGA_CAP_CURSOR_BYPASS, "cursor bypass" },
2972b273544SZack Rusin 	{ SVGA_CAP_CURSOR_BYPASS_2, "cursor bypass 2" },
2982b273544SZack Rusin 	{ SVGA_CAP_8BIT_EMULATION, "8bit emulation" },
2992b273544SZack Rusin 	{ SVGA_CAP_ALPHA_CURSOR, "alpha cursor" },
3002b273544SZack Rusin 	{ SVGA_CAP_3D, "3D" },
3012b273544SZack Rusin 	{ SVGA_CAP_EXTENDED_FIFO, "extended fifo" },
3022b273544SZack Rusin 	{ SVGA_CAP_MULTIMON, "multimon" },
3032b273544SZack Rusin 	{ SVGA_CAP_PITCHLOCK, "pitchlock" },
3042b273544SZack Rusin 	{ SVGA_CAP_IRQMASK, "irq mask" },
3052b273544SZack Rusin 	{ SVGA_CAP_DISPLAY_TOPOLOGY, "display topology" },
3062b273544SZack Rusin 	{ SVGA_CAP_GMR, "gmr" },
3072b273544SZack Rusin 	{ SVGA_CAP_TRACES, "traces" },
3082b273544SZack Rusin 	{ SVGA_CAP_GMR2, "gmr2" },
3092b273544SZack Rusin 	{ SVGA_CAP_SCREEN_OBJECT_2, "screen object 2" },
3102b273544SZack Rusin 	{ SVGA_CAP_COMMAND_BUFFERS, "command buffers" },
3112b273544SZack Rusin 	{ SVGA_CAP_CMD_BUFFERS_2, "command buffers 2" },
3122b273544SZack Rusin 	{ SVGA_CAP_GBOBJECTS, "gbobject" },
3132b273544SZack Rusin 	{ SVGA_CAP_DX, "dx" },
3142b273544SZack Rusin 	{ SVGA_CAP_HP_CMD_QUEUE, "hp cmd queue" },
3152b273544SZack Rusin 	{ SVGA_CAP_NO_BB_RESTRICTION, "no bb restriction" },
3162b273544SZack Rusin 	{ SVGA_CAP_CAP2_REGISTER, "cap2 register" },
3172b273544SZack Rusin };
3182b273544SZack Rusin 
3192b273544SZack Rusin 
3202b273544SZack Rusin static const struct bitmap_name cap2_names[] = {
3212b273544SZack Rusin 	{ SVGA_CAP2_GROW_OTABLE, "grow otable" },
3222b273544SZack Rusin 	{ SVGA_CAP2_INTRA_SURFACE_COPY, "intra surface copy" },
3232b273544SZack Rusin 	{ SVGA_CAP2_DX2, "dx2" },
3242b273544SZack Rusin 	{ SVGA_CAP2_GB_MEMSIZE_2, "gb memsize 2" },
3252b273544SZack Rusin 	{ SVGA_CAP2_SCREENDMA_REG, "screendma reg" },
3262b273544SZack Rusin 	{ SVGA_CAP2_OTABLE_PTDEPTH_2, "otable ptdepth2" },
3272b273544SZack Rusin 	{ SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT, "non ms to ms stretchblt" },
3282b273544SZack Rusin 	{ SVGA_CAP2_CURSOR_MOB, "cursor mob" },
3292b273544SZack Rusin 	{ SVGA_CAP2_MSHINT, "mshint" },
3302b273544SZack Rusin 	{ SVGA_CAP2_CB_MAX_SIZE_4MB, "cb max size 4mb" },
3312b273544SZack Rusin 	{ SVGA_CAP2_DX3, "dx3" },
3322b273544SZack Rusin 	{ SVGA_CAP2_FRAME_TYPE, "frame type" },
3332b273544SZack Rusin 	{ SVGA_CAP2_COTABLE_COPY, "cotable copy" },
3342b273544SZack Rusin 	{ SVGA_CAP2_TRACE_FULL_FB, "trace full fb" },
3352b273544SZack Rusin 	{ SVGA_CAP2_EXTRA_REGS, "extra regs" },
3362b273544SZack Rusin 	{ SVGA_CAP2_LO_STAGING, "lo staging" },
3372b273544SZack Rusin };
3382b273544SZack Rusin 
vmw_print_bitmap(struct drm_device * drm,const char * prefix,uint32_t bitmap,const struct bitmap_name * bnames,uint32_t num_names)3392b273544SZack Rusin static void vmw_print_bitmap(struct drm_device *drm,
3402b273544SZack Rusin 			     const char *prefix, uint32_t bitmap,
3412b273544SZack Rusin 			     const struct bitmap_name *bnames,
3422b273544SZack Rusin 			     uint32_t num_names)
3433b4c2511SNeha Bhende {
3442b273544SZack Rusin 	char buf[512];
3452b273544SZack Rusin 	uint32_t i;
3462b273544SZack Rusin 	uint32_t offset = 0;
3472b273544SZack Rusin 	for (i = 0; i < num_names; ++i) {
3482b273544SZack Rusin 		if ((bitmap & bnames[i].value) != 0) {
3492b273544SZack Rusin 			offset += snprintf(buf + offset,
3502b273544SZack Rusin 					   ARRAY_SIZE(buf) - offset,
3512b273544SZack Rusin 					   "%s, ", bnames[i].name);
3522b273544SZack Rusin 			bitmap &= ~bnames[i].value;
3532b273544SZack Rusin 		}
3543b4c2511SNeha Bhende 	}
3553b4c2511SNeha Bhende 
3562b273544SZack Rusin 	drm_info(drm, "%s: %s\n", prefix, buf);
3572b273544SZack Rusin 	if (bitmap != 0)
3582b273544SZack Rusin 		drm_dbg(drm, "%s: unknown enums: %x\n", prefix, bitmap);
3592b273544SZack Rusin }
3602b273544SZack Rusin 
3612b273544SZack Rusin 
vmw_print_sm_type(struct vmw_private * dev_priv)3622b273544SZack Rusin static void vmw_print_sm_type(struct vmw_private *dev_priv)
363fb1d9738SJakob Bornecrantz {
3642b273544SZack Rusin 	static const char *names[] = {
3652b273544SZack Rusin 		[VMW_SM_LEGACY] = "Legacy",
3662b273544SZack Rusin 		[VMW_SM_4] = "SM4",
3672b273544SZack Rusin 		[VMW_SM_4_1] = "SM4_1",
3682b273544SZack Rusin 		[VMW_SM_5] = "SM_5",
3694fb9326bSZack Rusin 		[VMW_SM_5_1X] = "SM_5_1X",
3702b273544SZack Rusin 		[VMW_SM_MAX] = "Invalid"
3712b273544SZack Rusin 	};
3722b273544SZack Rusin 	BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1));
3732b273544SZack Rusin 	drm_info(&dev_priv->drm, "Available shader model: %s.\n",
3742b273544SZack Rusin 		 names[dev_priv->sm_type]);
375fb1d9738SJakob Bornecrantz }
376fb1d9738SJakob Bornecrantz 
377e2fa3a76SThomas Hellstrom /**
3784b9e45e6SThomas Hellstrom  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
379e2fa3a76SThomas Hellstrom  *
3804b9e45e6SThomas Hellstrom  * @dev_priv: A device private structure.
381e2fa3a76SThomas Hellstrom  *
3824b9e45e6SThomas Hellstrom  * This function creates a small buffer object that holds the query
3834b9e45e6SThomas Hellstrom  * result for dummy queries emitted as query barriers.
3844b9e45e6SThomas Hellstrom  * The function will then map the first page and initialize a pending
3854b9e45e6SThomas Hellstrom  * occlusion query result structure, Finally it will unmap the buffer.
3864b9e45e6SThomas Hellstrom  * No interruptible waits are done within this function.
387e2fa3a76SThomas Hellstrom  *
3884b9e45e6SThomas Hellstrom  * Returns an error if bo creation or initialization fails.
389e2fa3a76SThomas Hellstrom  */
vmw_dummy_query_bo_create(struct vmw_private * dev_priv)3904b9e45e6SThomas Hellstrom static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
391e2fa3a76SThomas Hellstrom {
3924b9e45e6SThomas Hellstrom 	int ret;
39309881d29SZack Rusin 	struct vmw_bo *vbo;
394e2fa3a76SThomas Hellstrom 	struct ttm_bo_kmap_obj map;
395e2fa3a76SThomas Hellstrom 	volatile SVGA3dQueryResult *result;
396e2fa3a76SThomas Hellstrom 	bool dummy;
397668b2066SZack Rusin 	struct vmw_bo_params bo_params = {
398668b2066SZack Rusin 		.domain = VMW_BO_DOMAIN_SYS,
399668b2066SZack Rusin 		.busy_domain = VMW_BO_DOMAIN_SYS,
400668b2066SZack Rusin 		.bo_type = ttm_bo_type_kernel,
401668b2066SZack Rusin 		.size = PAGE_SIZE,
402668b2066SZack Rusin 		.pin = true
403668b2066SZack Rusin 	};
404e2fa3a76SThomas Hellstrom 
4054b9e45e6SThomas Hellstrom 	/*
406459d0fa7SThomas Hellstrom 	 * Create the vbo as pinned, so that a tryreserve will
4074b9e45e6SThomas Hellstrom 	 * immediately succeed. This is because we're the only
4084b9e45e6SThomas Hellstrom 	 * user of the bo currently.
4094b9e45e6SThomas Hellstrom 	 */
410668b2066SZack Rusin 	ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
411e2fa3a76SThomas Hellstrom 	if (unlikely(ret != 0))
4124b9e45e6SThomas Hellstrom 		return ret;
4134b9e45e6SThomas Hellstrom 
414668b2066SZack Rusin 	ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
4154b9e45e6SThomas Hellstrom 	BUG_ON(ret != 0);
416459d0fa7SThomas Hellstrom 	vmw_bo_pin_reserved(vbo, true);
417e2fa3a76SThomas Hellstrom 
418668b2066SZack Rusin 	ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
419e2fa3a76SThomas Hellstrom 	if (likely(ret == 0)) {
420e2fa3a76SThomas Hellstrom 		result = ttm_kmap_obj_virtual(&map, &dummy);
421e2fa3a76SThomas Hellstrom 		result->totalSize = sizeof(*result);
422e2fa3a76SThomas Hellstrom 		result->state = SVGA3D_QUERYSTATE_PENDING;
423e2fa3a76SThomas Hellstrom 		result->result32 = 0xff;
424e2fa3a76SThomas Hellstrom 		ttm_bo_kunmap(&map);
4254b9e45e6SThomas Hellstrom 	}
426459d0fa7SThomas Hellstrom 	vmw_bo_pin_reserved(vbo, false);
427668b2066SZack Rusin 	ttm_bo_unreserve(&vbo->tbo);
4284b9e45e6SThomas Hellstrom 
4294b9e45e6SThomas Hellstrom 	if (unlikely(ret != 0)) {
4304b9e45e6SThomas Hellstrom 		DRM_ERROR("Dummy query buffer map failed.\n");
431f1d34bfdSThomas Hellstrom 		vmw_bo_unreference(&vbo);
4324b9e45e6SThomas Hellstrom 	} else
433459d0fa7SThomas Hellstrom 		dev_priv->dummy_query_bo = vbo;
4344b9e45e6SThomas Hellstrom 
4354b9e45e6SThomas Hellstrom 	return ret;
436e2fa3a76SThomas Hellstrom }
437e2fa3a76SThomas Hellstrom 
vmw_device_init(struct vmw_private * dev_priv)4382cd80dbdSZack Rusin static int vmw_device_init(struct vmw_private *dev_priv)
4392cd80dbdSZack Rusin {
4402cd80dbdSZack Rusin 	bool uses_fb_traces = false;
4412cd80dbdSZack Rusin 
4422cd80dbdSZack Rusin 	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
4432cd80dbdSZack Rusin 	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
4442cd80dbdSZack Rusin 	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
4452cd80dbdSZack Rusin 
4462cd80dbdSZack Rusin 	vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
4472cd80dbdSZack Rusin 		  SVGA_REG_ENABLE_HIDE);
4482cd80dbdSZack Rusin 
4492cd80dbdSZack Rusin 	uses_fb_traces = !vmw_cmd_supported(dev_priv) &&
4502cd80dbdSZack Rusin 			 (dev_priv->capabilities & SVGA_CAP_TRACES) != 0;
4512cd80dbdSZack Rusin 
4522cd80dbdSZack Rusin 	vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces);
4532cd80dbdSZack Rusin 	dev_priv->fifo = vmw_fifo_create(dev_priv);
4542cd80dbdSZack Rusin 	if (IS_ERR(dev_priv->fifo)) {
4552cd80dbdSZack Rusin 		int err = PTR_ERR(dev_priv->fifo);
4562cd80dbdSZack Rusin 		dev_priv->fifo = NULL;
4572cd80dbdSZack Rusin 		return err;
4582cd80dbdSZack Rusin 	} else if (!dev_priv->fifo) {
4592cd80dbdSZack Rusin 		vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
4602cd80dbdSZack Rusin 	}
4612cd80dbdSZack Rusin 
4622cd80dbdSZack Rusin 	dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
4632cd80dbdSZack Rusin 	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
4642cd80dbdSZack Rusin 	return 0;
4652cd80dbdSZack Rusin }
4662cd80dbdSZack Rusin 
vmw_device_fini(struct vmw_private * vmw)4672cd80dbdSZack Rusin static void vmw_device_fini(struct vmw_private *vmw)
4682cd80dbdSZack Rusin {
4692cd80dbdSZack Rusin 	/*
4702cd80dbdSZack Rusin 	 * Legacy sync
4712cd80dbdSZack Rusin 	 */
4722cd80dbdSZack Rusin 	vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
4732cd80dbdSZack Rusin 	while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
4742cd80dbdSZack Rusin 		;
4752cd80dbdSZack Rusin 
4762cd80dbdSZack Rusin 	vmw->last_read_seqno = vmw_fence_read(vmw);
4772cd80dbdSZack Rusin 
4782cd80dbdSZack Rusin 	vmw_write(vmw, SVGA_REG_CONFIG_DONE,
4792cd80dbdSZack Rusin 		  vmw->config_done_state);
4802cd80dbdSZack Rusin 	vmw_write(vmw, SVGA_REG_ENABLE,
4812cd80dbdSZack Rusin 		  vmw->enable_state);
4822cd80dbdSZack Rusin 	vmw_write(vmw, SVGA_REG_TRACES,
4832cd80dbdSZack Rusin 		  vmw->traces_state);
4842cd80dbdSZack Rusin 
4852cd80dbdSZack Rusin 	vmw_fifo_destroy(vmw);
4862cd80dbdSZack Rusin }
4872cd80dbdSZack Rusin 
488153b3d5bSThomas Hellstrom /**
489153b3d5bSThomas Hellstrom  * vmw_request_device_late - Perform late device setup
490153b3d5bSThomas Hellstrom  *
491153b3d5bSThomas Hellstrom  * @dev_priv: Pointer to device private.
492153b3d5bSThomas Hellstrom  *
493153b3d5bSThomas Hellstrom  * This function performs setup of otables and enables large command
494153b3d5bSThomas Hellstrom  * buffer submission. These tasks are split out to a separate function
495153b3d5bSThomas Hellstrom  * because it reverts vmw_release_device_early and is intended to be used
496153b3d5bSThomas Hellstrom  * by an error path in the hibernation code.
497153b3d5bSThomas Hellstrom  */
vmw_request_device_late(struct vmw_private * dev_priv)498153b3d5bSThomas Hellstrom static int vmw_request_device_late(struct vmw_private *dev_priv)
499153b3d5bSThomas Hellstrom {
500153b3d5bSThomas Hellstrom 	int ret;
501153b3d5bSThomas Hellstrom 
502153b3d5bSThomas Hellstrom 	if (dev_priv->has_mob) {
503153b3d5bSThomas Hellstrom 		ret = vmw_otables_setup(dev_priv);
504153b3d5bSThomas Hellstrom 		if (unlikely(ret != 0)) {
505153b3d5bSThomas Hellstrom 			DRM_ERROR("Unable to initialize "
506153b3d5bSThomas Hellstrom 				  "guest Memory OBjects.\n");
507153b3d5bSThomas Hellstrom 			return ret;
508153b3d5bSThomas Hellstrom 		}
509153b3d5bSThomas Hellstrom 	}
510153b3d5bSThomas Hellstrom 
5113eab3d9eSThomas Hellstrom 	if (dev_priv->cman) {
5128426ed9cSZack Rusin 		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
5133eab3d9eSThomas Hellstrom 		if (ret) {
5143eab3d9eSThomas Hellstrom 			struct vmw_cmdbuf_man *man = dev_priv->cman;
5153eab3d9eSThomas Hellstrom 
5163eab3d9eSThomas Hellstrom 			dev_priv->cman = NULL;
5173eab3d9eSThomas Hellstrom 			vmw_cmdbuf_man_destroy(man);
5183eab3d9eSThomas Hellstrom 		}
5193eab3d9eSThomas Hellstrom 	}
5203eab3d9eSThomas Hellstrom 
521153b3d5bSThomas Hellstrom 	return 0;
522153b3d5bSThomas Hellstrom }
523153b3d5bSThomas Hellstrom 
vmw_request_device(struct vmw_private * dev_priv)524fb1d9738SJakob Bornecrantz static int vmw_request_device(struct vmw_private *dev_priv)
525fb1d9738SJakob Bornecrantz {
526fb1d9738SJakob Bornecrantz 	int ret;
527fb1d9738SJakob Bornecrantz 
5282cd80dbdSZack Rusin 	ret = vmw_device_init(dev_priv);
529fb1d9738SJakob Bornecrantz 	if (unlikely(ret != 0)) {
5302cd80dbdSZack Rusin 		DRM_ERROR("Unable to initialize the device.\n");
531fb1d9738SJakob Bornecrantz 		return ret;
532fb1d9738SJakob Bornecrantz 	}
533ae2a1040SThomas Hellstrom 	vmw_fence_fifo_up(dev_priv->fman);
5343eab3d9eSThomas Hellstrom 	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
535d80efd5cSThomas Hellstrom 	if (IS_ERR(dev_priv->cman)) {
5363eab3d9eSThomas Hellstrom 		dev_priv->cman = NULL;
537878c6ecdSDeepak Rawat 		dev_priv->sm_type = VMW_SM_LEGACY;
538d80efd5cSThomas Hellstrom 	}
539153b3d5bSThomas Hellstrom 
540153b3d5bSThomas Hellstrom 	ret = vmw_request_device_late(dev_priv);
541153b3d5bSThomas Hellstrom 	if (ret)
5423530bdc3SThomas Hellstrom 		goto out_no_mob;
543153b3d5bSThomas Hellstrom 
544e2fa3a76SThomas Hellstrom 	ret = vmw_dummy_query_bo_create(dev_priv);
545e2fa3a76SThomas Hellstrom 	if (unlikely(ret != 0))
546e2fa3a76SThomas Hellstrom 		goto out_no_query_bo;
547fb1d9738SJakob Bornecrantz 
548fb1d9738SJakob Bornecrantz 	return 0;
549e2fa3a76SThomas Hellstrom 
550e2fa3a76SThomas Hellstrom out_no_query_bo:
5513eab3d9eSThomas Hellstrom 	if (dev_priv->cman)
5523eab3d9eSThomas Hellstrom 		vmw_cmdbuf_remove_pool(dev_priv->cman);
553153b3d5bSThomas Hellstrom 	if (dev_priv->has_mob) {
5544ce032d6SChristian König 		struct ttm_resource_manager *man;
5554ce032d6SChristian König 
5564ce032d6SChristian König 		man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
5574ce032d6SChristian König 		ttm_resource_manager_evict_all(&dev_priv->bdev, man);
5583530bdc3SThomas Hellstrom 		vmw_otables_takedown(dev_priv);
559153b3d5bSThomas Hellstrom 	}
5603eab3d9eSThomas Hellstrom 	if (dev_priv->cman)
5613eab3d9eSThomas Hellstrom 		vmw_cmdbuf_man_destroy(dev_priv->cman);
5623530bdc3SThomas Hellstrom out_no_mob:
563e2fa3a76SThomas Hellstrom 	vmw_fence_fifo_down(dev_priv->fman);
5642cd80dbdSZack Rusin 	vmw_device_fini(dev_priv);
565e2fa3a76SThomas Hellstrom 	return ret;
566fb1d9738SJakob Bornecrantz }
567fb1d9738SJakob Bornecrantz 
568153b3d5bSThomas Hellstrom /**
569153b3d5bSThomas Hellstrom  * vmw_release_device_early - Early part of fifo takedown.
570153b3d5bSThomas Hellstrom  *
571153b3d5bSThomas Hellstrom  * @dev_priv: Pointer to device private struct.
572153b3d5bSThomas Hellstrom  *
573153b3d5bSThomas Hellstrom  * This is the first part of command submission takedown, to be called before
574153b3d5bSThomas Hellstrom  * buffer management is taken down.
575153b3d5bSThomas Hellstrom  */
vmw_release_device_early(struct vmw_private * dev_priv)576153b3d5bSThomas Hellstrom static void vmw_release_device_early(struct vmw_private *dev_priv)
577fb1d9738SJakob Bornecrantz {
578e2fa3a76SThomas Hellstrom 	/*
579e2fa3a76SThomas Hellstrom 	 * Previous destructions should've released
580e2fa3a76SThomas Hellstrom 	 * the pinned bo.
581e2fa3a76SThomas Hellstrom 	 */
582e2fa3a76SThomas Hellstrom 
583e2fa3a76SThomas Hellstrom 	BUG_ON(dev_priv->pinned_bo != NULL);
584e2fa3a76SThomas Hellstrom 
585f1d34bfdSThomas Hellstrom 	vmw_bo_unreference(&dev_priv->dummy_query_bo);
5863eab3d9eSThomas Hellstrom 	if (dev_priv->cman)
5873eab3d9eSThomas Hellstrom 		vmw_cmdbuf_remove_pool(dev_priv->cman);
5883eab3d9eSThomas Hellstrom 
589153b3d5bSThomas Hellstrom 	if (dev_priv->has_mob) {
5904ce032d6SChristian König 		struct ttm_resource_manager *man;
5914ce032d6SChristian König 
5924ce032d6SChristian König 		man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
5934ce032d6SChristian König 		ttm_resource_manager_evict_all(&dev_priv->bdev, man);
5943530bdc3SThomas Hellstrom 		vmw_otables_takedown(dev_priv);
595153b3d5bSThomas Hellstrom 	}
596153b3d5bSThomas Hellstrom }
597153b3d5bSThomas Hellstrom 
598153b3d5bSThomas Hellstrom /**
599153b3d5bSThomas Hellstrom  * vmw_release_device_late - Late part of fifo takedown.
600153b3d5bSThomas Hellstrom  *
601153b3d5bSThomas Hellstrom  * @dev_priv: Pointer to device private struct.
602153b3d5bSThomas Hellstrom  *
603153b3d5bSThomas Hellstrom  * This is the last part of the command submission takedown, to be called when
604153b3d5bSThomas Hellstrom  * command submission is no longer needed. It may wait on pending fences.
605153b3d5bSThomas Hellstrom  */
vmw_release_device_late(struct vmw_private * dev_priv)606153b3d5bSThomas Hellstrom static void vmw_release_device_late(struct vmw_private *dev_priv)
607153b3d5bSThomas Hellstrom {
608ae2a1040SThomas Hellstrom 	vmw_fence_fifo_down(dev_priv->fman);
6093eab3d9eSThomas Hellstrom 	if (dev_priv->cman)
6103eab3d9eSThomas Hellstrom 		vmw_cmdbuf_man_destroy(dev_priv->cman);
6113eab3d9eSThomas Hellstrom 
6122cd80dbdSZack Rusin 	vmw_device_fini(dev_priv);
613fb1d9738SJakob Bornecrantz }
614fb1d9738SJakob Bornecrantz 
615e68cefd1SLee Jones /*
616eb4f923bSJakob Bornecrantz  * Sets the initial_[width|height] fields on the given vmw_private.
617eb4f923bSJakob Bornecrantz  *
618eb4f923bSJakob Bornecrantz  * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
61967d4a87bSThomas Hellstrom  * clamping the value to fb_max_[width|height] fields and the
62067d4a87bSThomas Hellstrom  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
62167d4a87bSThomas Hellstrom  * If the values appear to be invalid, set them to
622eb4f923bSJakob Bornecrantz  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
623eb4f923bSJakob Bornecrantz  */
vmw_get_initial_size(struct vmw_private * dev_priv)624eb4f923bSJakob Bornecrantz static void vmw_get_initial_size(struct vmw_private *dev_priv)
625eb4f923bSJakob Bornecrantz {
626eb4f923bSJakob Bornecrantz 	uint32_t width;
627eb4f923bSJakob Bornecrantz 	uint32_t height;
628eb4f923bSJakob Bornecrantz 
629eb4f923bSJakob Bornecrantz 	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
630eb4f923bSJakob Bornecrantz 	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
631eb4f923bSJakob Bornecrantz 
632df42523cSZack Rusin 	width = max_t(uint32_t, width, VMWGFX_MIN_INITIAL_WIDTH);
633df42523cSZack Rusin 	height = max_t(uint32_t, height, VMWGFX_MIN_INITIAL_HEIGHT);
63467d4a87bSThomas Hellstrom 
63567d4a87bSThomas Hellstrom 	if (width > dev_priv->fb_max_width ||
63667d4a87bSThomas Hellstrom 	    height > dev_priv->fb_max_height) {
63767d4a87bSThomas Hellstrom 
63867d4a87bSThomas Hellstrom 		/*
63967d4a87bSThomas Hellstrom 		 * This is a host error and shouldn't occur.
64067d4a87bSThomas Hellstrom 		 */
64167d4a87bSThomas Hellstrom 
642df42523cSZack Rusin 		width  = VMWGFX_MIN_INITIAL_WIDTH;
643df42523cSZack Rusin 		height = VMWGFX_MIN_INITIAL_HEIGHT;
64467d4a87bSThomas Hellstrom 	}
645eb4f923bSJakob Bornecrantz 
646eb4f923bSJakob Bornecrantz 	dev_priv->initial_width = width;
647eb4f923bSJakob Bornecrantz 	dev_priv->initial_height = height;
648eb4f923bSJakob Bornecrantz }
649eb4f923bSJakob Bornecrantz 
650d92d9851SThomas Hellstrom /**
651d92d9851SThomas Hellstrom  * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
652d92d9851SThomas Hellstrom  * system.
653d92d9851SThomas Hellstrom  *
654d92d9851SThomas Hellstrom  * @dev_priv: Pointer to a struct vmw_private
655d92d9851SThomas Hellstrom  *
65681103355SThomas Hellstrom  * This functions tries to determine what actions need to be taken by the
65781103355SThomas Hellstrom  * driver to make system pages visible to the device.
658d92d9851SThomas Hellstrom  * If this function decides that DMA is not possible, it returns -EINVAL.
659d92d9851SThomas Hellstrom  * The driver may then try to disable features of the device that require
660d92d9851SThomas Hellstrom  * DMA.
661d92d9851SThomas Hellstrom  */
vmw_dma_select_mode(struct vmw_private * dev_priv)662d92d9851SThomas Hellstrom static int vmw_dma_select_mode(struct vmw_private *dev_priv)
663d92d9851SThomas Hellstrom {
664d92d9851SThomas Hellstrom 	static const char *names[vmw_dma_map_max] = {
665d92d9851SThomas Hellstrom 		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
66681103355SThomas Hellstrom 		[vmw_dma_map_populate] = "Caching DMA mappings.",
667d92d9851SThomas Hellstrom 		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
668d92d9851SThomas Hellstrom 
66988685c3eSZack Rusin 	/*
67088685c3eSZack Rusin 	 * When running with SEV we always want dma mappings, because
67188685c3eSZack Rusin 	 * otherwise ttm tt pool pages will bounce through swiotlb running
67288685c3eSZack Rusin 	 * out of available space.
67388685c3eSZack Rusin 	 */
67488685c3eSZack Rusin 	if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
675d92d9851SThomas Hellstrom 		dev_priv->map_mode = vmw_dma_alloc_coherent;
67681103355SThomas Hellstrom 	else if (vmw_restrict_iommu)
67781103355SThomas Hellstrom 		dev_priv->map_mode = vmw_dma_map_bind;
67805f9467eSChristoph Hellwig 	else
67905f9467eSChristoph Hellwig 		dev_priv->map_mode = vmw_dma_map_populate;
68005f9467eSChristoph Hellwig 
6812b273544SZack Rusin 	drm_info(&dev_priv->drm,
6822b273544SZack Rusin 		 "DMA map mode: %s\n", names[dev_priv->map_mode]);
683d92d9851SThomas Hellstrom 	return 0;
684d92d9851SThomas Hellstrom }
685d92d9851SThomas Hellstrom 
6860d00c488SThomas Hellstrom /**
6870d00c488SThomas Hellstrom  * vmw_dma_masks - set required page- and dma masks
6880d00c488SThomas Hellstrom  *
689e68cefd1SLee Jones  * @dev_priv: Pointer to struct drm-device
6900d00c488SThomas Hellstrom  *
6910d00c488SThomas Hellstrom  * With 32-bit we can only handle 32 bit PFNs. Optionally set that
6920d00c488SThomas Hellstrom  * restriction also for 64-bit systems.
6930d00c488SThomas Hellstrom  */
vmw_dma_masks(struct vmw_private * dev_priv)6940d00c488SThomas Hellstrom static int vmw_dma_masks(struct vmw_private *dev_priv)
6950d00c488SThomas Hellstrom {
6969703bb32SZack Rusin 	struct drm_device *dev = &dev_priv->drm;
6974cbfa1e6SThomas Hellstrom 	int ret = 0;
6980d00c488SThomas Hellstrom 
6994cbfa1e6SThomas Hellstrom 	ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
700f674a218SZack Rusin 	if (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask) {
7012b273544SZack Rusin 		drm_info(&dev_priv->drm,
7022b273544SZack Rusin 			 "Restricting DMA addresses to 44 bits.\n");
7034cbfa1e6SThomas Hellstrom 		return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
7040d00c488SThomas Hellstrom 	}
7054cbfa1e6SThomas Hellstrom 
7064cbfa1e6SThomas Hellstrom 	return ret;
7070d00c488SThomas Hellstrom }
7080d00c488SThomas Hellstrom 
vmw_vram_manager_init(struct vmw_private * dev_priv)709252f8d7bSDave Airlie static int vmw_vram_manager_init(struct vmw_private *dev_priv)
710252f8d7bSDave Airlie {
711252f8d7bSDave Airlie 	int ret;
7129c3006a4SChristian König 	ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
7130fe438ceSChristian König 				 dev_priv->vram_size >> PAGE_SHIFT);
7149de59bc2SDave Airlie 	ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
715252f8d7bSDave Airlie 	return ret;
716252f8d7bSDave Airlie }
717e0830704SDave Airlie 
vmw_vram_manager_fini(struct vmw_private * dev_priv)718e0830704SDave Airlie static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
719e0830704SDave Airlie {
720a3431602SDave Airlie 	ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
721e0830704SDave Airlie }
722e0830704SDave Airlie 
vmw_setup_pci_resources(struct vmw_private * dev,u32 pci_id)7238772c0bbSZack Rusin static int vmw_setup_pci_resources(struct vmw_private *dev,
724baee602eSZack Rusin 				   u32 pci_id)
7258772c0bbSZack Rusin {
7262cd80dbdSZack Rusin 	resource_size_t rmmio_start;
7272cd80dbdSZack Rusin 	resource_size_t rmmio_size;
7288772c0bbSZack Rusin 	resource_size_t fifo_start;
7298772c0bbSZack Rusin 	resource_size_t fifo_size;
7308772c0bbSZack Rusin 	int ret;
7318772c0bbSZack Rusin 	struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
7328772c0bbSZack Rusin 
7338772c0bbSZack Rusin 	pci_set_master(pdev);
7348772c0bbSZack Rusin 
7358772c0bbSZack Rusin 	ret = pci_request_regions(pdev, "vmwgfx probe");
7368772c0bbSZack Rusin 	if (ret)
7378772c0bbSZack Rusin 		return ret;
7388772c0bbSZack Rusin 
7392cd80dbdSZack Rusin 	dev->pci_id = pci_id;
7402cd80dbdSZack Rusin 	if (pci_id == VMWGFX_PCI_ID_SVGA3) {
7412cd80dbdSZack Rusin 		rmmio_start = pci_resource_start(pdev, 0);
7422cd80dbdSZack Rusin 		rmmio_size = pci_resource_len(pdev, 0);
7432cd80dbdSZack Rusin 		dev->vram_start = pci_resource_start(pdev, 2);
7442cd80dbdSZack Rusin 		dev->vram_size = pci_resource_len(pdev, 2);
7452cd80dbdSZack Rusin 
7462b273544SZack Rusin 		drm_info(&dev->drm,
7472b273544SZack Rusin 			"Register MMIO at 0x%pa size is %llu kiB\n",
7482cd80dbdSZack Rusin 			 &rmmio_start, (uint64_t)rmmio_size / 1024);
7492cd80dbdSZack Rusin 		dev->rmmio = devm_ioremap(dev->drm.dev,
7502cd80dbdSZack Rusin 					  rmmio_start,
7512cd80dbdSZack Rusin 					  rmmio_size);
752527a9471SQiheng Lin 		if (!dev->rmmio) {
7532b273544SZack Rusin 			drm_err(&dev->drm,
7542b273544SZack Rusin 				"Failed mapping registers mmio memory.\n");
7552cd80dbdSZack Rusin 			pci_release_regions(pdev);
756527a9471SQiheng Lin 			return -ENOMEM;
7572cd80dbdSZack Rusin 		}
7582cd80dbdSZack Rusin 	} else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
7598772c0bbSZack Rusin 		dev->io_start = pci_resource_start(pdev, 0);
7608772c0bbSZack Rusin 		dev->vram_start = pci_resource_start(pdev, 1);
7618772c0bbSZack Rusin 		dev->vram_size = pci_resource_len(pdev, 1);
7628772c0bbSZack Rusin 		fifo_start = pci_resource_start(pdev, 2);
7638772c0bbSZack Rusin 		fifo_size = pci_resource_len(pdev, 2);
7648772c0bbSZack Rusin 
7652b273544SZack Rusin 		drm_info(&dev->drm,
7662b273544SZack Rusin 			 "FIFO at %pa size is %llu kiB\n",
7678772c0bbSZack Rusin 			 &fifo_start, (uint64_t)fifo_size / 1024);
7688772c0bbSZack Rusin 		dev->fifo_mem = devm_memremap(dev->drm.dev,
7698772c0bbSZack Rusin 					      fifo_start,
7708772c0bbSZack Rusin 					      fifo_size,
7718772c0bbSZack Rusin 					      MEMREMAP_WB);
7728772c0bbSZack Rusin 
773f3ebd4e6SDan Carpenter 		if (IS_ERR(dev->fifo_mem)) {
7742b273544SZack Rusin 			drm_err(&dev->drm,
7752b273544SZack Rusin 				  "Failed mapping FIFO memory.\n");
776f3ebd4e6SDan Carpenter 			pci_release_regions(pdev);
777f3ebd4e6SDan Carpenter 			return PTR_ERR(dev->fifo_mem);
7788772c0bbSZack Rusin 		}
7792cd80dbdSZack Rusin 	} else {
7802cd80dbdSZack Rusin 		pci_release_regions(pdev);
7812cd80dbdSZack Rusin 		return -EINVAL;
7822cd80dbdSZack Rusin 	}
7838772c0bbSZack Rusin 
7848772c0bbSZack Rusin 	/*
7858772c0bbSZack Rusin 	 * This is approximate size of the vram, the exact size will only
7868772c0bbSZack Rusin 	 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
7878772c0bbSZack Rusin 	 * size will be equal to or bigger than the size reported by
7888772c0bbSZack Rusin 	 * SVGA_REG_VRAM_SIZE.
7898772c0bbSZack Rusin 	 */
7902b273544SZack Rusin 	drm_info(&dev->drm,
7912b273544SZack Rusin 		 "VRAM at %pa size is %llu kiB\n",
7928772c0bbSZack Rusin 		 &dev->vram_start, (uint64_t)dev->vram_size / 1024);
7938772c0bbSZack Rusin 
7948772c0bbSZack Rusin 	return 0;
7958772c0bbSZack Rusin }
7968772c0bbSZack Rusin 
vmw_detect_version(struct vmw_private * dev)7978772c0bbSZack Rusin static int vmw_detect_version(struct vmw_private *dev)
7988772c0bbSZack Rusin {
7998772c0bbSZack Rusin 	uint32_t svga_id;
8008772c0bbSZack Rusin 
8012cd80dbdSZack Rusin 	vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ?
8022cd80dbdSZack Rusin 			  SVGA_ID_3 : SVGA_ID_2);
8038772c0bbSZack Rusin 	svga_id = vmw_read(dev, SVGA_REG_ID);
8042cd80dbdSZack Rusin 	if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) {
8052b273544SZack Rusin 		drm_err(&dev->drm,
8062b273544SZack Rusin 			"Unsupported SVGA ID 0x%x on chipset 0x%x\n",
807baee602eSZack Rusin 			svga_id, dev->pci_id);
8088772c0bbSZack Rusin 		return -ENOSYS;
8098772c0bbSZack Rusin 	}
8102cd80dbdSZack Rusin 	BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3));
8112b273544SZack Rusin 	drm_info(&dev->drm,
8122b273544SZack Rusin 		 "Running on SVGA version %d.\n", (svga_id & 0xff));
8138772c0bbSZack Rusin 	return 0;
8148772c0bbSZack Rusin }
8158772c0bbSZack Rusin 
vmw_write_driver_id(struct vmw_private * dev)8167f4c3377SZack Rusin static void vmw_write_driver_id(struct vmw_private *dev)
8177f4c3377SZack Rusin {
8187f4c3377SZack Rusin 	if ((dev->capabilities2 & SVGA_CAP2_DX2) != 0) {
8197f4c3377SZack Rusin 		vmw_write(dev,  SVGA_REG_GUEST_DRIVER_ID,
8207f4c3377SZack Rusin 			  SVGA_REG_GUEST_DRIVER_ID_LINUX);
8217f4c3377SZack Rusin 
8227f4c3377SZack Rusin 		vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION1,
8237f4c3377SZack Rusin 			  LINUX_VERSION_MAJOR << 24 |
8247f4c3377SZack Rusin 			  LINUX_VERSION_PATCHLEVEL << 16 |
8257f4c3377SZack Rusin 			  LINUX_VERSION_SUBLEVEL);
8267f4c3377SZack Rusin 		vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION2,
8277f4c3377SZack Rusin 			  VMWGFX_DRIVER_MAJOR << 24 |
8287f4c3377SZack Rusin 			  VMWGFX_DRIVER_MINOR << 16 |
8297f4c3377SZack Rusin 			  VMWGFX_DRIVER_PATCHLEVEL);
8307f4c3377SZack Rusin 		vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION3, 0);
8317f4c3377SZack Rusin 
8327f4c3377SZack Rusin 		vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID,
8337f4c3377SZack Rusin 			  SVGA_REG_GUEST_DRIVER_ID_SUBMIT);
8347f4c3377SZack Rusin 	}
8357f4c3377SZack Rusin }
8367f4c3377SZack Rusin 
vmw_sw_context_init(struct vmw_private * dev_priv)8379e931f2eSMaaz Mombasawala static void vmw_sw_context_init(struct vmw_private *dev_priv)
8389e931f2eSMaaz Mombasawala {
8399e931f2eSMaaz Mombasawala 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
8409e931f2eSMaaz Mombasawala 
8419e931f2eSMaaz Mombasawala 	hash_init(sw_context->res_ht);
8429e931f2eSMaaz Mombasawala }
8439e931f2eSMaaz Mombasawala 
vmw_sw_context_fini(struct vmw_private * dev_priv)8449e931f2eSMaaz Mombasawala static void vmw_sw_context_fini(struct vmw_private *dev_priv)
8459e931f2eSMaaz Mombasawala {
8469e931f2eSMaaz Mombasawala 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
8479e931f2eSMaaz Mombasawala 
8489e931f2eSMaaz Mombasawala 	vfree(sw_context->cmd_bounce);
8499e931f2eSMaaz Mombasawala 	if (sw_context->staged_bindings)
8509e931f2eSMaaz Mombasawala 		vmw_binding_state_free(sw_context->staged_bindings);
8519e931f2eSMaaz Mombasawala }
8529e931f2eSMaaz Mombasawala 
vmw_driver_load(struct vmw_private * dev_priv,u32 pci_id)8538772c0bbSZack Rusin static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
854fb1d9738SJakob Bornecrantz {
855fb1d9738SJakob Bornecrantz 	int ret;
856c0951b79SThomas Hellstrom 	enum vmw_res_type i;
857d92d9851SThomas Hellstrom 	bool refuse_dma = false;
8589703bb32SZack Rusin 	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
859fb1d9738SJakob Bornecrantz 
8608772c0bbSZack Rusin 	dev_priv->drm.dev_private = dev_priv;
8618772c0bbSZack Rusin 
8629e931f2eSMaaz Mombasawala 	vmw_sw_context_init(dev_priv);
8639e931f2eSMaaz Mombasawala 
86468ce556bSZack Rusin 	mutex_init(&dev_priv->cmdbuf_mutex);
86568ce556bSZack Rusin 	mutex_init(&dev_priv->binding_mutex);
86668ce556bSZack Rusin 	spin_lock_init(&dev_priv->resource_lock);
86768ce556bSZack Rusin 	spin_lock_init(&dev_priv->hw_lock);
86868ce556bSZack Rusin 	spin_lock_init(&dev_priv->waiter_lock);
86968ce556bSZack Rusin 	spin_lock_init(&dev_priv->cursor_lock);
87068ce556bSZack Rusin 
8718772c0bbSZack Rusin 	ret = vmw_setup_pci_resources(dev_priv, pci_id);
8728772c0bbSZack Rusin 	if (ret)
8738772c0bbSZack Rusin 		return ret;
8748772c0bbSZack Rusin 	ret = vmw_detect_version(dev_priv);
8758772c0bbSZack Rusin 	if (ret)
87675ec69c7SZack Rusin 		goto out_no_pci_or_version;
8778772c0bbSZack Rusin 
878c0951b79SThomas Hellstrom 
879c0951b79SThomas Hellstrom 	for (i = vmw_res_context; i < vmw_res_max; ++i) {
880aec70c39SDeepak R Varma 		idr_init_base(&dev_priv->res_idr[i], 1);
881c0951b79SThomas Hellstrom 		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
882c0951b79SThomas Hellstrom 	}
883c0951b79SThomas Hellstrom 
884fb1d9738SJakob Bornecrantz 	init_waitqueue_head(&dev_priv->fence_queue);
885fb1d9738SJakob Bornecrantz 	init_waitqueue_head(&dev_priv->fifo_queue);
8864f73a96bSThomas Hellstrom 	dev_priv->fence_queue_waiters = 0;
887d2e8851aSThomas Hellstrom 	dev_priv->fifo_queue_waiters = 0;
888c0951b79SThomas Hellstrom 
8895bb39e81SThomas Hellstrom 	dev_priv->used_memory_size = 0;
890fb1d9738SJakob Bornecrantz 
89104319d89SSinclair Yeh 	dev_priv->assume_16bpp = !!vmw_assume_16bpp;
89204319d89SSinclair Yeh 
893fb1d9738SJakob Bornecrantz 	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
894af326e28SZack Rusin 	vmw_print_bitmap(&dev_priv->drm, "Capabilities",
895af326e28SZack Rusin 			 dev_priv->capabilities,
896af326e28SZack Rusin 			 cap1_names, ARRAY_SIZE(cap1_names));
8973b4c2511SNeha Bhende 	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
8983b4c2511SNeha Bhende 		dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
899af326e28SZack Rusin 		vmw_print_bitmap(&dev_priv->drm, "Capabilities2",
900af326e28SZack Rusin 				 dev_priv->capabilities2,
901af326e28SZack Rusin 				 cap2_names, ARRAY_SIZE(cap2_names));
9023b4c2511SNeha Bhende 	}
9033b4c2511SNeha Bhende 
90435d86fb6SZack Rusin 	if (!vmwgfx_supported(dev_priv)) {
90535d86fb6SZack Rusin 		vmw_disable_backdoor();
90635d86fb6SZack Rusin 		drm_err_once(&dev_priv->drm,
90735d86fb6SZack Rusin 			     "vmwgfx seems to be running on an unsupported hypervisor.");
90835d86fb6SZack Rusin 		drm_err_once(&dev_priv->drm,
90935d86fb6SZack Rusin 			     "This configuration is likely broken.");
91035d86fb6SZack Rusin 		drm_err_once(&dev_priv->drm,
91135d86fb6SZack Rusin 			     "Please switch to a supported graphics device to avoid problems.");
91235d86fb6SZack Rusin 	}
91335d86fb6SZack Rusin 
914d92d9851SThomas Hellstrom 	ret = vmw_dma_select_mode(dev_priv);
915d92d9851SThomas Hellstrom 	if (unlikely(ret != 0)) {
9162b273544SZack Rusin 		drm_info(&dev_priv->drm,
9172b273544SZack Rusin 			 "Restricting capabilities since DMA not available.\n");
918d92d9851SThomas Hellstrom 		refuse_dma = true;
91981a00960SThomas Hellstrom 		if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
9202b273544SZack Rusin 			drm_info(&dev_priv->drm,
9212b273544SZack Rusin 				 "Disabling 3D acceleration.\n");
922d92d9851SThomas Hellstrom 	}
923fb1d9738SJakob Bornecrantz 
9245bb39e81SThomas Hellstrom 	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
925be4f77acSZack Rusin 	dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
9265bb39e81SThomas Hellstrom 	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
9275bb39e81SThomas Hellstrom 	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
928eb4f923bSJakob Bornecrantz 
929eb4f923bSJakob Bornecrantz 	vmw_get_initial_size(dev_priv);
930eb4f923bSJakob Bornecrantz 
9310d00c488SThomas Hellstrom 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
932fb1d9738SJakob Bornecrantz 		dev_priv->max_gmr_ids =
933fb1d9738SJakob Bornecrantz 			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
934fb17f189SThomas Hellstrom 		dev_priv->max_gmr_pages =
935fb17f189SThomas Hellstrom 			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
936fb17f189SThomas Hellstrom 		dev_priv->memory_size =
937fb17f189SThomas Hellstrom 			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
9385bb39e81SThomas Hellstrom 		dev_priv->memory_size -= dev_priv->vram_size;
9395bb39e81SThomas Hellstrom 	} else {
9405bb39e81SThomas Hellstrom 		/*
9415bb39e81SThomas Hellstrom 		 * An arbitrary limit of 512MiB on surface
9425bb39e81SThomas Hellstrom 		 * memory. But all HWV8 hardware supports GMR2.
9435bb39e81SThomas Hellstrom 		 */
9445bb39e81SThomas Hellstrom 		dev_priv->memory_size = 512*1024*1024;
945fb17f189SThomas Hellstrom 	}
9466da768aaSThomas Hellstrom 	dev_priv->max_mob_pages = 0;
947857aea1cSCharmaine Lee 	dev_priv->max_mob_size = 0;
9486da768aaSThomas Hellstrom 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
9497ebb47c9SDeepak Rawat 		uint64_t mem_size;
9507ebb47c9SDeepak Rawat 
9517ebb47c9SDeepak Rawat 		if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
9527ebb47c9SDeepak Rawat 			mem_size = vmw_read(dev_priv,
9537ebb47c9SDeepak Rawat 					    SVGA_REG_GBOBJECT_MEM_SIZE_KB);
9547ebb47c9SDeepak Rawat 		else
9557ebb47c9SDeepak Rawat 			mem_size =
9566da768aaSThomas Hellstrom 				vmw_read(dev_priv,
9576da768aaSThomas Hellstrom 					 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
9586da768aaSThomas Hellstrom 
9597c20d213SSinclair Yeh 		/*
9607c20d213SSinclair Yeh 		 * Workaround for low memory 2D VMs to compensate for the
9617c20d213SSinclair Yeh 		 * allocation taken by fbdev
9627c20d213SSinclair Yeh 		 */
9637c20d213SSinclair Yeh 		if (!(dev_priv->capabilities & SVGA_CAP_3D))
964cef75036SSinclair Yeh 			mem_size *= 3;
9657c20d213SSinclair Yeh 
9666da768aaSThomas Hellstrom 		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
967ebc9ac7cSZack Rusin 		dev_priv->max_primary_mem =
968ebc9ac7cSZack Rusin 			vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
969857aea1cSCharmaine Lee 		dev_priv->max_mob_size =
970857aea1cSCharmaine Lee 			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
97135c05125SSinclair Yeh 		dev_priv->stdu_max_width =
97235c05125SSinclair Yeh 			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
97335c05125SSinclair Yeh 		dev_priv->stdu_max_height =
97435c05125SSinclair Yeh 			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
97535c05125SSinclair Yeh 
97635c05125SSinclair Yeh 		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
97735c05125SSinclair Yeh 			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
97835c05125SSinclair Yeh 		dev_priv->texture_max_width = vmw_read(dev_priv,
97935c05125SSinclair Yeh 						       SVGA_REG_DEV_CAP);
98035c05125SSinclair Yeh 		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
98135c05125SSinclair Yeh 			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
98235c05125SSinclair Yeh 		dev_priv->texture_max_height = vmw_read(dev_priv,
98335c05125SSinclair Yeh 							SVGA_REG_DEV_CAP);
984df45e9d4SThomas Hellstrom 	} else {
985df45e9d4SThomas Hellstrom 		dev_priv->texture_max_width = 8192;
986df45e9d4SThomas Hellstrom 		dev_priv->texture_max_height = 8192;
987ebc9ac7cSZack Rusin 		dev_priv->max_primary_mem = dev_priv->vram_size;
988df45e9d4SThomas Hellstrom 	}
989cfdc3458SZack Rusin 	drm_info(&dev_priv->drm,
990cfdc3458SZack Rusin 		 "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n",
991cfdc3458SZack Rusin 		 (u64)dev_priv->vram_size / 1024,
992cfdc3458SZack Rusin 		 (u64)dev_priv->fifo_mem_size / 1024,
993cfdc3458SZack Rusin 		 dev_priv->memory_size / 1024);
994cfdc3458SZack Rusin 
995cfdc3458SZack Rusin 	drm_info(&dev_priv->drm,
996cfdc3458SZack Rusin 		 "MOB limits: max mob size = %u kB, max mob pages = %u\n",
997cfdc3458SZack Rusin 		 dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages);
998fb1d9738SJakob Bornecrantz 
9990d00c488SThomas Hellstrom 	ret = vmw_dma_masks(dev_priv);
1000496eb6fdSThomas Hellstrom 	if (unlikely(ret != 0))
10010d00c488SThomas Hellstrom 		goto out_err0;
10020d00c488SThomas Hellstrom 
10039703bb32SZack Rusin 	dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
100439916897SQian Cai 
10050d00c488SThomas Hellstrom 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
10062b273544SZack Rusin 		drm_info(&dev_priv->drm,
10072b273544SZack Rusin 			 "Max GMR ids is %u\n",
1008fb1d9738SJakob Bornecrantz 			 (unsigned)dev_priv->max_gmr_ids);
10092b273544SZack Rusin 		drm_info(&dev_priv->drm,
10102b273544SZack Rusin 			 "Max number of GMR pages is %u\n",
1011fb17f189SThomas Hellstrom 			 (unsigned)dev_priv->max_gmr_pages);
1012fb17f189SThomas Hellstrom 	}
10132b273544SZack Rusin 	drm_info(&dev_priv->drm,
10142b273544SZack Rusin 		 "Maximum display memory size is %llu kiB\n",
1015ebc9ac7cSZack Rusin 		 (uint64_t)dev_priv->max_primary_mem / 1024);
1016fb1d9738SJakob Bornecrantz 
1017d7e1958dSJakob Bornecrantz 	/* Need mmio memory to check for fifo pitchlock cap. */
1018d7e1958dSJakob Bornecrantz 	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
1019d7e1958dSJakob Bornecrantz 	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
1020d7e1958dSJakob Bornecrantz 	    !vmw_fifo_have_pitchlock(dev_priv)) {
1021d7e1958dSJakob Bornecrantz 		ret = -ENOSYS;
1022d7e1958dSJakob Bornecrantz 		DRM_ERROR("Hardware has no pitchlock\n");
1023be4f77acSZack Rusin 		goto out_err0;
1024d7e1958dSJakob Bornecrantz 	}
1025d7e1958dSJakob Bornecrantz 
1026931e09d8SMaaz Mombasawala 	dev_priv->tdev = ttm_object_device_init(&vmw_prime_dmabuf_ops);
1027fb1d9738SJakob Bornecrantz 
1028fb1d9738SJakob Bornecrantz 	if (unlikely(dev_priv->tdev == NULL)) {
10292b273544SZack Rusin 		drm_err(&dev_priv->drm,
10302b273544SZack Rusin 			"Unable to initialize TTM object management.\n");
1031fb1d9738SJakob Bornecrantz 		ret = -ENOMEM;
1032be4f77acSZack Rusin 		goto out_err0;
1033fb1d9738SJakob Bornecrantz 	}
1034fb1d9738SJakob Bornecrantz 
1035506ff75cSThomas Hellstrom 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
103632160e6aSZack Rusin 		ret = vmw_irq_install(dev_priv);
1037506ff75cSThomas Hellstrom 		if (ret != 0) {
10382b273544SZack Rusin 			drm_err(&dev_priv->drm,
10392b273544SZack Rusin 				"Failed installing irq: %d\n", ret);
1040506ff75cSThomas Hellstrom 			goto out_no_irq;
1041506ff75cSThomas Hellstrom 		}
1042506ff75cSThomas Hellstrom 	}
1043506ff75cSThomas Hellstrom 
1044ae2a1040SThomas Hellstrom 	dev_priv->fman = vmw_fence_manager_init(dev_priv);
104514bbf20cSWei Yongjun 	if (unlikely(dev_priv->fman == NULL)) {
104614bbf20cSWei Yongjun 		ret = -ENOMEM;
1047ae2a1040SThomas Hellstrom 		goto out_no_fman;
104814bbf20cSWei Yongjun 	}
104956d1c78dSJakob Bornecrantz 
10508af8a109SChristian König 	ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
10519703bb32SZack Rusin 			      dev_priv->drm.dev,
10529703bb32SZack Rusin 			      dev_priv->drm.anon_inode->i_mapping,
1053298799a2SZack Rusin 			      dev_priv->drm.vma_offset_manager,
1054ee5d2a8eSChristian König 			      dev_priv->map_mode == vmw_dma_alloc_coherent,
1055153b3d5bSThomas Hellstrom 			      false);
1056153b3d5bSThomas Hellstrom 	if (unlikely(ret != 0)) {
10572b273544SZack Rusin 		drm_err(&dev_priv->drm,
10582b273544SZack Rusin 			"Failed initializing TTM buffer object driver.\n");
1059153b3d5bSThomas Hellstrom 		goto out_no_bdev;
1060153b3d5bSThomas Hellstrom 	}
10613458390bSThomas Hellstrom 
1062153b3d5bSThomas Hellstrom 	/*
1063153b3d5bSThomas Hellstrom 	 * Enable VRAM, but initially don't use it until SVGA is enabled and
1064153b3d5bSThomas Hellstrom 	 * unhidden.
1065153b3d5bSThomas Hellstrom 	 */
1066252f8d7bSDave Airlie 
1067252f8d7bSDave Airlie 	ret = vmw_vram_manager_init(dev_priv);
10683458390bSThomas Hellstrom 	if (unlikely(ret != 0)) {
10692b273544SZack Rusin 		drm_err(&dev_priv->drm,
10702b273544SZack Rusin 			"Failed initializing memory manager for VRAM.\n");
10713458390bSThomas Hellstrom 		goto out_no_vram;
10723458390bSThomas Hellstrom 	}
10733458390bSThomas Hellstrom 
1074d92223eaSZack Rusin 	ret = vmw_devcaps_create(dev_priv);
1075d92223eaSZack Rusin 	if (unlikely(ret != 0)) {
10762b273544SZack Rusin 		drm_err(&dev_priv->drm,
10772b273544SZack Rusin 			"Failed initializing device caps.\n");
1078d92223eaSZack Rusin 		goto out_no_vram;
1079d92223eaSZack Rusin 	}
1080d92223eaSZack Rusin 
10813629ca5dSChristian König 	/*
10823629ca5dSChristian König 	 * "Guest Memory Regions" is an aperture like feature with
10833629ca5dSChristian König 	 *  one slot per bo. There is an upper limit of the number of
10843629ca5dSChristian König 	 *  slots as well as the bo size.
10853629ca5dSChristian König 	 */
10863458390bSThomas Hellstrom 	dev_priv->has_gmr = true;
10873629ca5dSChristian König 	/* TODO: This is most likely not correct */
10883458390bSThomas Hellstrom 	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
108962161778SDave Airlie 	    refuse_dma ||
109062161778SDave Airlie 	    vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
10912b273544SZack Rusin 		drm_info(&dev_priv->drm,
10922b273544SZack Rusin 			  "No GMR memory available. "
10933458390bSThomas Hellstrom 			 "Graphics memory resources are very limited.\n");
10943458390bSThomas Hellstrom 		dev_priv->has_gmr = false;
10953458390bSThomas Hellstrom 	}
10963458390bSThomas Hellstrom 
109781a00960SThomas Hellstrom 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
10983458390bSThomas Hellstrom 		dev_priv->has_mob = true;
109962161778SDave Airlie 
110062161778SDave Airlie 		if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
11012b273544SZack Rusin 			drm_info(&dev_priv->drm,
11022b273544SZack Rusin 				 "No MOB memory available. "
11033458390bSThomas Hellstrom 				 "3D will be disabled.\n");
11043458390bSThomas Hellstrom 			dev_priv->has_mob = false;
11053458390bSThomas Hellstrom 		}
1106f6be2326SZack Rusin 		if (vmw_sys_man_init(dev_priv) != 0) {
1107f6be2326SZack Rusin 			drm_info(&dev_priv->drm,
1108f6be2326SZack Rusin 				 "No MOB page table memory available. "
1109f6be2326SZack Rusin 				 "3D will be disabled.\n");
1110f6be2326SZack Rusin 			dev_priv->has_mob = false;
1111f6be2326SZack Rusin 		}
11123458390bSThomas Hellstrom 	}
11133458390bSThomas Hellstrom 
1114ef7c7b74SDeepak Rawat 	if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
1115d92223eaSZack Rusin 		if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_DXCONTEXT))
1116878c6ecdSDeepak Rawat 			dev_priv->sm_type = VMW_SM_4;
1117d80efd5cSThomas Hellstrom 	}
111856d1c78dSJakob Bornecrantz 
1119878c6ecdSDeepak Rawat 	/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
1120878c6ecdSDeepak Rawat 	if (has_sm4_context(dev_priv) &&
1121878c6ecdSDeepak Rawat 	    (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
1122d92223eaSZack Rusin 		if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM41))
1123878c6ecdSDeepak Rawat 			dev_priv->sm_type = VMW_SM_4_1;
11244dec2805SDeepak Rawat 		if (has_sm4_1_context(dev_priv) &&
11254dec2805SDeepak Rawat 				(dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
11264fb9326bSZack Rusin 			if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) {
11274dec2805SDeepak Rawat 				dev_priv->sm_type = VMW_SM_5;
11284fb9326bSZack Rusin 				if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43))
11294fb9326bSZack Rusin 					dev_priv->sm_type = VMW_SM_5_1X;
11304fb9326bSZack Rusin 			}
11314dec2805SDeepak Rawat 		}
1132878c6ecdSDeepak Rawat 	}
1133878c6ecdSDeepak Rawat 
11347a1c2f6cSThomas Hellstrom 	ret = vmw_kms_init(dev_priv);
11357a1c2f6cSThomas Hellstrom 	if (unlikely(ret != 0))
11367a1c2f6cSThomas Hellstrom 		goto out_no_kms;
1137fb1d9738SJakob Bornecrantz 	vmw_overlay_init(dev_priv);
113856d1c78dSJakob Bornecrantz 
1139153b3d5bSThomas Hellstrom 	ret = vmw_request_device(dev_priv);
1140153b3d5bSThomas Hellstrom 	if (ret)
1141506ff75cSThomas Hellstrom 		goto out_no_fifo;
1142153b3d5bSThomas Hellstrom 
11432b273544SZack Rusin 	vmw_print_sm_type(dev_priv);
1144523375c9SZack Rusin 	vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
1145f9217913SSinclair Yeh 			VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
1146523375c9SZack Rusin 			VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
11477f4c3377SZack Rusin 	vmw_write_driver_id(dev_priv);
1148f9217913SSinclair Yeh 
1149d9f36a00SThomas Hellstrom 	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
1150d9f36a00SThomas Hellstrom 	register_pm_notifier(&dev_priv->pm_nb);
1151d9f36a00SThomas Hellstrom 
1152fb1d9738SJakob Bornecrantz 	return 0;
1153fb1d9738SJakob Bornecrantz 
1154506ff75cSThomas Hellstrom out_no_fifo:
115556d1c78dSJakob Bornecrantz 	vmw_overlay_close(dev_priv);
115656d1c78dSJakob Bornecrantz 	vmw_kms_close(dev_priv);
115756d1c78dSJakob Bornecrantz out_no_kms:
1158f6be2326SZack Rusin 	if (dev_priv->has_mob) {
11596eee6675SDave Airlie 		vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1160f6be2326SZack Rusin 		vmw_sys_man_fini(dev_priv);
1161f6be2326SZack Rusin 	}
11623458390bSThomas Hellstrom 	if (dev_priv->has_gmr)
11636eee6675SDave Airlie 		vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1164d92223eaSZack Rusin 	vmw_devcaps_destroy(dev_priv);
1165e0830704SDave Airlie 	vmw_vram_manager_fini(dev_priv);
11663458390bSThomas Hellstrom out_no_vram:
11678af8a109SChristian König 	ttm_device_fini(&dev_priv->bdev);
1168153b3d5bSThomas Hellstrom out_no_bdev:
1169ae2a1040SThomas Hellstrom 	vmw_fence_manager_takedown(dev_priv->fman);
1170ae2a1040SThomas Hellstrom out_no_fman:
1171506ff75cSThomas Hellstrom 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
11729703bb32SZack Rusin 		vmw_irq_uninstall(&dev_priv->drm);
1173506ff75cSThomas Hellstrom out_no_irq:
1174fb1d9738SJakob Bornecrantz 	ttm_object_device_release(&dev_priv->tdev);
1175fb1d9738SJakob Bornecrantz out_err0:
1176c0951b79SThomas Hellstrom 	for (i = vmw_res_context; i < vmw_res_max; ++i)
1177c0951b79SThomas Hellstrom 		idr_destroy(&dev_priv->res_idr[i]);
1178c0951b79SThomas Hellstrom 
1179d80efd5cSThomas Hellstrom 	if (dev_priv->ctx.staged_bindings)
1180d80efd5cSThomas Hellstrom 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
118175ec69c7SZack Rusin out_no_pci_or_version:
118275ec69c7SZack Rusin 	pci_release_regions(pdev);
1183fb1d9738SJakob Bornecrantz 	return ret;
1184fb1d9738SJakob Bornecrantz }
1185fb1d9738SJakob Bornecrantz 
vmw_driver_unload(struct drm_device * dev)118611b3c20bSGabriel Krisman Bertazi static void vmw_driver_unload(struct drm_device *dev)
1187fb1d9738SJakob Bornecrantz {
1188fb1d9738SJakob Bornecrantz 	struct vmw_private *dev_priv = vmw_priv(dev);
1189840462e6SThomas Zimmermann 	struct pci_dev *pdev = to_pci_dev(dev->dev);
1190c0951b79SThomas Hellstrom 	enum vmw_res_type i;
1191fb1d9738SJakob Bornecrantz 
1192d9f36a00SThomas Hellstrom 	unregister_pm_notifier(&dev_priv->pm_nb);
1193d9f36a00SThomas Hellstrom 
11949e931f2eSMaaz Mombasawala 	vmw_sw_context_fini(dev_priv);
1195153b3d5bSThomas Hellstrom 	vmw_fifo_resource_dec(dev_priv);
1196df42523cSZack Rusin 
1197153b3d5bSThomas Hellstrom 	vmw_svga_disable(dev_priv);
1198153b3d5bSThomas Hellstrom 
1199fb1d9738SJakob Bornecrantz 	vmw_kms_close(dev_priv);
1200fb1d9738SJakob Bornecrantz 	vmw_overlay_close(dev_priv);
12013458390bSThomas Hellstrom 
12023458390bSThomas Hellstrom 	if (dev_priv->has_gmr)
12036eee6675SDave Airlie 		vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
12043458390bSThomas Hellstrom 
1205153b3d5bSThomas Hellstrom 	vmw_release_device_early(dev_priv);
1206f6be2326SZack Rusin 	if (dev_priv->has_mob) {
12076eee6675SDave Airlie 		vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1208f6be2326SZack Rusin 		vmw_sys_man_fini(dev_priv);
1209f6be2326SZack Rusin 	}
1210d92223eaSZack Rusin 	vmw_devcaps_destroy(dev_priv);
1211e0830704SDave Airlie 	vmw_vram_manager_fini(dev_priv);
12128af8a109SChristian König 	ttm_device_fini(&dev_priv->bdev);
1213153b3d5bSThomas Hellstrom 	vmw_release_device_late(dev_priv);
1214ae2a1040SThomas Hellstrom 	vmw_fence_manager_takedown(dev_priv->fman);
1215506ff75cSThomas Hellstrom 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
12169703bb32SZack Rusin 		vmw_irq_uninstall(&dev_priv->drm);
1217f2d12b8eSThomas Hellstrom 
1218fb1d9738SJakob Bornecrantz 	ttm_object_device_release(&dev_priv->tdev);
1219c0951b79SThomas Hellstrom 
1220c0951b79SThomas Hellstrom 	for (i = vmw_res_context; i < vmw_res_max; ++i)
1221c0951b79SThomas Hellstrom 		idr_destroy(&dev_priv->res_idr[i]);
1222fb1d9738SJakob Bornecrantz 
12237a7a933eSMartin Krastev 	vmw_mksstat_remove_all(dev_priv);
12247a7a933eSMartin Krastev 
122575ec69c7SZack Rusin 	pci_release_regions(pdev);
1226fb1d9738SJakob Bornecrantz }
1227fb1d9738SJakob Bornecrantz 
vmw_postclose(struct drm_device * dev,struct drm_file * file_priv)1228fb1d9738SJakob Bornecrantz static void vmw_postclose(struct drm_device *dev,
1229fb1d9738SJakob Bornecrantz 			 struct drm_file *file_priv)
1230fb1d9738SJakob Bornecrantz {
12319c84aebaSThomas Hellstrom 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1232c4249855SThomas Hellstrom 
1233c4249855SThomas Hellstrom 	ttm_object_file_release(&vmw_fp->tfile);
1234fb1d9738SJakob Bornecrantz 	kfree(vmw_fp);
1235fb1d9738SJakob Bornecrantz }
1236fb1d9738SJakob Bornecrantz 
vmw_driver_open(struct drm_device * dev,struct drm_file * file_priv)1237fb1d9738SJakob Bornecrantz static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1238fb1d9738SJakob Bornecrantz {
1239fb1d9738SJakob Bornecrantz 	struct vmw_private *dev_priv = vmw_priv(dev);
1240fb1d9738SJakob Bornecrantz 	struct vmw_fpriv *vmw_fp;
1241fb1d9738SJakob Bornecrantz 	int ret = -ENOMEM;
1242fb1d9738SJakob Bornecrantz 
1243fb1d9738SJakob Bornecrantz 	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
12441a4adb05SRavikant B Sharma 	if (unlikely(!vmw_fp))
1245fb1d9738SJakob Bornecrantz 		return ret;
1246fb1d9738SJakob Bornecrantz 
124776a9e07fSMaaz Mombasawala 	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev);
1248fb1d9738SJakob Bornecrantz 	if (unlikely(vmw_fp->tfile == NULL))
1249fb1d9738SJakob Bornecrantz 		goto out_no_tfile;
1250fb1d9738SJakob Bornecrantz 
1251fb1d9738SJakob Bornecrantz 	file_priv->driver_priv = vmw_fp;
1252fb1d9738SJakob Bornecrantz 
1253fb1d9738SJakob Bornecrantz 	return 0;
1254fb1d9738SJakob Bornecrantz 
1255fb1d9738SJakob Bornecrantz out_no_tfile:
1256fb1d9738SJakob Bornecrantz 	kfree(vmw_fp);
1257fb1d9738SJakob Bornecrantz 	return ret;
1258fb1d9738SJakob Bornecrantz }
1259fb1d9738SJakob Bornecrantz 
vmw_generic_ioctl(struct file * filp,unsigned int cmd,unsigned long arg,long (* ioctl_func)(struct file *,unsigned int,unsigned long))126064190bdeSThomas Hellstrom static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
126164190bdeSThomas Hellstrom 			      unsigned long arg,
126264190bdeSThomas Hellstrom 			      long (*ioctl_func)(struct file *, unsigned int,
126364190bdeSThomas Hellstrom 						 unsigned long))
1264fb1d9738SJakob Bornecrantz {
1265fb1d9738SJakob Bornecrantz 	struct drm_file *file_priv = filp->private_data;
1266fb1d9738SJakob Bornecrantz 	struct drm_device *dev = file_priv->minor->dev;
1267fb1d9738SJakob Bornecrantz 	unsigned int nr = DRM_IOCTL_NR(cmd);
126864190bdeSThomas Hellstrom 	unsigned int flags;
1269fb1d9738SJakob Bornecrantz 
1270fb1d9738SJakob Bornecrantz 	/*
1271e1f78003SThomas Hellstrom 	 * Do extra checking on driver private ioctls.
1272fb1d9738SJakob Bornecrantz 	 */
1273fb1d9738SJakob Bornecrantz 
1274fb1d9738SJakob Bornecrantz 	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1275fb1d9738SJakob Bornecrantz 	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1276baa70943SRob Clark 		const struct drm_ioctl_desc *ioctl =
1277fb1d9738SJakob Bornecrantz 			&vmw_ioctls[nr - DRM_COMMAND_BASE];
1278fb1d9738SJakob Bornecrantz 
1279d80efd5cSThomas Hellstrom 		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1280cbfbe47fSEmil Velikov 			return ioctl_func(filp, cmd, arg);
128131788ca8SThomas Hellstrom 		} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
128231788ca8SThomas Hellstrom 			if (!drm_is_current_master(file_priv) &&
128331788ca8SThomas Hellstrom 			    !capable(CAP_SYS_ADMIN))
128431788ca8SThomas Hellstrom 				return -EACCES;
1285fb1d9738SJakob Bornecrantz 		}
1286d80efd5cSThomas Hellstrom 
1287d80efd5cSThomas Hellstrom 		if (unlikely(ioctl->cmd != cmd))
1288d80efd5cSThomas Hellstrom 			goto out_io_encoding;
1289d80efd5cSThomas Hellstrom 
129064190bdeSThomas Hellstrom 		flags = ioctl->flags;
129164190bdeSThomas Hellstrom 	} else if (!drm_ioctl_flags(nr, &flags))
129264190bdeSThomas Hellstrom 		return -EINVAL;
129364190bdeSThomas Hellstrom 
12949c84aebaSThomas Hellstrom 	return ioctl_func(filp, cmd, arg);
1295d80efd5cSThomas Hellstrom 
1296d80efd5cSThomas Hellstrom out_io_encoding:
1297d80efd5cSThomas Hellstrom 	DRM_ERROR("Invalid command format, ioctl %d\n",
1298d80efd5cSThomas Hellstrom 		  nr - DRM_COMMAND_BASE);
1299d80efd5cSThomas Hellstrom 
1300d80efd5cSThomas Hellstrom 	return -EINVAL;
1301fb1d9738SJakob Bornecrantz }
1302fb1d9738SJakob Bornecrantz 
vmw_unlocked_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)130364190bdeSThomas Hellstrom static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
130464190bdeSThomas Hellstrom 			       unsigned long arg)
130564190bdeSThomas Hellstrom {
130664190bdeSThomas Hellstrom 	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
130764190bdeSThomas Hellstrom }
130864190bdeSThomas Hellstrom 
130964190bdeSThomas Hellstrom #ifdef CONFIG_COMPAT
vmw_compat_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)131064190bdeSThomas Hellstrom static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
131164190bdeSThomas Hellstrom 			     unsigned long arg)
131264190bdeSThomas Hellstrom {
131364190bdeSThomas Hellstrom 	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
131464190bdeSThomas Hellstrom }
131564190bdeSThomas Hellstrom #endif
131664190bdeSThomas Hellstrom 
vmw_master_set(struct drm_device * dev,struct drm_file * file_priv,bool from_open)1317907f5320SEmil Velikov static void vmw_master_set(struct drm_device *dev,
1318fb1d9738SJakob Bornecrantz 			   struct drm_file *file_priv,
1319fb1d9738SJakob Bornecrantz 			   bool from_open)
1320fb1d9738SJakob Bornecrantz {
132163cb4444SThomas Hellstrom 	/*
132263cb4444SThomas Hellstrom 	 * Inform a new master that the layout may have changed while
132363cb4444SThomas Hellstrom 	 * it was gone.
132463cb4444SThomas Hellstrom 	 */
132563cb4444SThomas Hellstrom 	if (!from_open)
13265ea17348SThomas Hellstrom 		drm_sysfs_hotplug_event(dev);
1327fb1d9738SJakob Bornecrantz }
1328fb1d9738SJakob Bornecrantz 
vmw_master_drop(struct drm_device * dev,struct drm_file * file_priv)1329fb1d9738SJakob Bornecrantz static void vmw_master_drop(struct drm_device *dev,
1330d6ed682eSDaniel Vetter 			    struct drm_file *file_priv)
1331fb1d9738SJakob Bornecrantz {
1332fb1d9738SJakob Bornecrantz 	struct vmw_private *dev_priv = vmw_priv(dev);
1333fb1d9738SJakob Bornecrantz 
13348fbf9d92SThomas Hellstrom 	vmw_kms_legacy_hotspot_clear(dev_priv);
1335fb1d9738SJakob Bornecrantz }
1336fb1d9738SJakob Bornecrantz 
vmwgfx_supported(struct vmw_private * vmw)133735d86fb6SZack Rusin bool vmwgfx_supported(struct vmw_private *vmw)
133835d86fb6SZack Rusin {
133935d86fb6SZack Rusin #if defined(CONFIG_X86)
134035d86fb6SZack Rusin 	return hypervisor_is_type(X86_HYPER_VMWARE);
134135d86fb6SZack Rusin #elif defined(CONFIG_ARM64)
134235d86fb6SZack Rusin 	/*
134335d86fb6SZack Rusin 	 * On aarch64 only svga3 is supported
134435d86fb6SZack Rusin 	 */
134535d86fb6SZack Rusin 	return vmw->pci_id == VMWGFX_PCI_ID_SVGA3;
134635d86fb6SZack Rusin #else
134735d86fb6SZack Rusin 	drm_warn_once(&vmw->drm,
134835d86fb6SZack Rusin 		      "vmwgfx is running on an unknown architecture.");
134935d86fb6SZack Rusin 	return false;
135035d86fb6SZack Rusin #endif
135135d86fb6SZack Rusin }
135235d86fb6SZack Rusin 
1353153b3d5bSThomas Hellstrom /**
1354153b3d5bSThomas Hellstrom  * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1355153b3d5bSThomas Hellstrom  *
1356153b3d5bSThomas Hellstrom  * @dev_priv: Pointer to device private struct.
1357153b3d5bSThomas Hellstrom  * Needs the reservation sem to be held in non-exclusive mode.
1358153b3d5bSThomas Hellstrom  */
__vmw_svga_enable(struct vmw_private * dev_priv)1359b9eb1a61SThomas Hellstrom static void __vmw_svga_enable(struct vmw_private *dev_priv)
1360153b3d5bSThomas Hellstrom {
13619de59bc2SDave Airlie 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1362089cafc1SDave Airlie 
13639de59bc2SDave Airlie 	if (!ttm_resource_manager_used(man)) {
13642cc8bfeeSZack Rusin 		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
13659de59bc2SDave Airlie 		ttm_resource_manager_set_used(man, true);
1366153b3d5bSThomas Hellstrom 	}
1367153b3d5bSThomas Hellstrom }
1368153b3d5bSThomas Hellstrom 
1369153b3d5bSThomas Hellstrom /**
1370153b3d5bSThomas Hellstrom  * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1371153b3d5bSThomas Hellstrom  *
1372153b3d5bSThomas Hellstrom  * @dev_priv: Pointer to device private struct.
1373153b3d5bSThomas Hellstrom  */
vmw_svga_enable(struct vmw_private * dev_priv)1374153b3d5bSThomas Hellstrom void vmw_svga_enable(struct vmw_private *dev_priv)
1375153b3d5bSThomas Hellstrom {
1376153b3d5bSThomas Hellstrom 	__vmw_svga_enable(dev_priv);
1377153b3d5bSThomas Hellstrom }
1378153b3d5bSThomas Hellstrom 
1379153b3d5bSThomas Hellstrom /**
1380153b3d5bSThomas Hellstrom  * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1381153b3d5bSThomas Hellstrom  *
1382153b3d5bSThomas Hellstrom  * @dev_priv: Pointer to device private struct.
1383153b3d5bSThomas Hellstrom  * Needs the reservation sem to be held in exclusive mode.
1384153b3d5bSThomas Hellstrom  * Will not empty VRAM. VRAM must be emptied by caller.
1385153b3d5bSThomas Hellstrom  */
__vmw_svga_disable(struct vmw_private * dev_priv)1386b9eb1a61SThomas Hellstrom static void __vmw_svga_disable(struct vmw_private *dev_priv)
1387153b3d5bSThomas Hellstrom {
13889de59bc2SDave Airlie 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1389089cafc1SDave Airlie 
13909de59bc2SDave Airlie 	if (ttm_resource_manager_used(man)) {
13919de59bc2SDave Airlie 		ttm_resource_manager_set_used(man, false);
1392153b3d5bSThomas Hellstrom 		vmw_write(dev_priv, SVGA_REG_ENABLE,
13938ce75f8aSSinclair Yeh 			  SVGA_REG_ENABLE_HIDE |
13948ce75f8aSSinclair Yeh 			  SVGA_REG_ENABLE_ENABLE);
1395153b3d5bSThomas Hellstrom 	}
1396153b3d5bSThomas Hellstrom }
1397153b3d5bSThomas Hellstrom 
1398153b3d5bSThomas Hellstrom /**
1399153b3d5bSThomas Hellstrom  * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1400153b3d5bSThomas Hellstrom  * running.
1401153b3d5bSThomas Hellstrom  *
1402153b3d5bSThomas Hellstrom  * @dev_priv: Pointer to device private struct.
1403153b3d5bSThomas Hellstrom  * Will empty VRAM.
1404153b3d5bSThomas Hellstrom  */
vmw_svga_disable(struct vmw_private * dev_priv)1405153b3d5bSThomas Hellstrom void vmw_svga_disable(struct vmw_private *dev_priv)
1406153b3d5bSThomas Hellstrom {
14079de59bc2SDave Airlie 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1408140bcaa2SThomas Hellstrom 	/*
1409140bcaa2SThomas Hellstrom 	 * Disabling SVGA will turn off device modesetting capabilities, so
1410140bcaa2SThomas Hellstrom 	 * notify KMS about that so that it doesn't cache atomic state that
1411140bcaa2SThomas Hellstrom 	 * isn't valid anymore, for example crtcs turned on.
1412140bcaa2SThomas Hellstrom 	 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1413140bcaa2SThomas Hellstrom 	 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1414140bcaa2SThomas Hellstrom 	 * end up with lock order reversal. Thus, a master may actually perform
1415140bcaa2SThomas Hellstrom 	 * a new modeset just after we call vmw_kms_lost_device() and race with
1416140bcaa2SThomas Hellstrom 	 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1417140bcaa2SThomas Hellstrom 	 * to be inconsistent with the device, causing modesetting problems.
1418140bcaa2SThomas Hellstrom 	 *
1419140bcaa2SThomas Hellstrom 	 */
14209703bb32SZack Rusin 	vmw_kms_lost_device(&dev_priv->drm);
14219de59bc2SDave Airlie 	if (ttm_resource_manager_used(man)) {
14224ce032d6SChristian König 		if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
1423153b3d5bSThomas Hellstrom 			DRM_ERROR("Failed evicting VRAM buffers.\n");
1424ade94143SDaniel Vetter 		ttm_resource_manager_set_used(man, false);
14258ce75f8aSSinclair Yeh 		vmw_write(dev_priv, SVGA_REG_ENABLE,
14268ce75f8aSSinclair Yeh 			  SVGA_REG_ENABLE_HIDE |
14278ce75f8aSSinclair Yeh 			  SVGA_REG_ENABLE_ENABLE);
1428ff36baf8SDaniel Vetter 	}
1429153b3d5bSThomas Hellstrom }
1430fb1d9738SJakob Bornecrantz 
vmw_remove(struct pci_dev * pdev)1431fb1d9738SJakob Bornecrantz static void vmw_remove(struct pci_dev *pdev)
1432fb1d9738SJakob Bornecrantz {
1433fb1d9738SJakob Bornecrantz 	struct drm_device *dev = pci_get_drvdata(pdev);
1434fb1d9738SJakob Bornecrantz 
143536891da8SThomas Zimmermann 	drm_dev_unregister(dev);
143636891da8SThomas Zimmermann 	vmw_driver_unload(dev);
1437fb1d9738SJakob Bornecrantz }
1438fb1d9738SJakob Bornecrantz 
vmw_debugfs_resource_managers_init(struct vmw_private * vmw)1439af4a25bbSZack Rusin static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
1440af4a25bbSZack Rusin {
1441af4a25bbSZack Rusin 	struct drm_minor *minor = vmw->drm.primary;
1442af4a25bbSZack Rusin 	struct dentry *root = minor->debugfs_root;
1443af4a25bbSZack Rusin 
1444af4a25bbSZack Rusin 	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_SYSTEM),
1445af4a25bbSZack Rusin 					    root, "system_ttm");
1446af4a25bbSZack Rusin 	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
1447af4a25bbSZack Rusin 					    root, "vram_ttm");
1448042ef0afSJocelyn Falempe 	if (vmw->has_gmr)
1449af4a25bbSZack Rusin 		ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
1450af4a25bbSZack Rusin 						    root, "gmr_ttm");
1451042ef0afSJocelyn Falempe 	if (vmw->has_mob) {
1452af4a25bbSZack Rusin 		ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
1453af4a25bbSZack Rusin 						    root, "mob_ttm");
1454af4a25bbSZack Rusin 		ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
1455af4a25bbSZack Rusin 						    root, "system_mob_ttm");
1456af4a25bbSZack Rusin 	}
1457042ef0afSJocelyn Falempe }
1458af4a25bbSZack Rusin 
vmwgfx_pm_notifier(struct notifier_block * nb,unsigned long val,void * ptr)1459d9f36a00SThomas Hellstrom static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1460d9f36a00SThomas Hellstrom 			      void *ptr)
1461d9f36a00SThomas Hellstrom {
1462d9f36a00SThomas Hellstrom 	struct vmw_private *dev_priv =
1463d9f36a00SThomas Hellstrom 		container_of(nb, struct vmw_private, pm_nb);
1464d9f36a00SThomas Hellstrom 
1465d9f36a00SThomas Hellstrom 	switch (val) {
1466d9f36a00SThomas Hellstrom 	case PM_HIBERNATION_PREPARE:
1467153b3d5bSThomas Hellstrom 		/*
1468c3b9b165SThomas Hellstrom 		 * Take the reservation sem in write mode, which will make sure
1469c3b9b165SThomas Hellstrom 		 * there are no other processes holding a buffer object
1470c3b9b165SThomas Hellstrom 		 * reservation, meaning we should be able to evict all buffer
1471c3b9b165SThomas Hellstrom 		 * objects if needed.
1472c3b9b165SThomas Hellstrom 		 * Once user-space processes have been frozen, we can release
1473c3b9b165SThomas Hellstrom 		 * the lock again.
1474d9f36a00SThomas Hellstrom 		 */
1475c3b9b165SThomas Hellstrom 		dev_priv->suspend_locked = true;
1476d9f36a00SThomas Hellstrom 		break;
1477d9f36a00SThomas Hellstrom 	case PM_POST_HIBERNATION:
1478094e0fa8SThomas Hellstrom 	case PM_POST_RESTORE:
1479c3b9b165SThomas Hellstrom 		if (READ_ONCE(dev_priv->suspend_locked)) {
1480c3b9b165SThomas Hellstrom 			dev_priv->suspend_locked = false;
1481c3b9b165SThomas Hellstrom 		}
1482d9f36a00SThomas Hellstrom 		break;
1483d9f36a00SThomas Hellstrom 	default:
1484d9f36a00SThomas Hellstrom 		break;
1485d9f36a00SThomas Hellstrom 	}
1486d9f36a00SThomas Hellstrom 	return 0;
1487d9f36a00SThomas Hellstrom }
1488d9f36a00SThomas Hellstrom 
vmw_pci_suspend(struct pci_dev * pdev,pm_message_t state)14897fbd721aSThomas Hellstrom static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1490d9f36a00SThomas Hellstrom {
1491094e0fa8SThomas Hellstrom 	struct drm_device *dev = pci_get_drvdata(pdev);
1492094e0fa8SThomas Hellstrom 	struct vmw_private *dev_priv = vmw_priv(dev);
1493094e0fa8SThomas Hellstrom 
1494153b3d5bSThomas Hellstrom 	if (dev_priv->refuse_hibernation)
1495094e0fa8SThomas Hellstrom 		return -EBUSY;
1496094e0fa8SThomas Hellstrom 
1497d9f36a00SThomas Hellstrom 	pci_save_state(pdev);
1498d9f36a00SThomas Hellstrom 	pci_disable_device(pdev);
1499d9f36a00SThomas Hellstrom 	pci_set_power_state(pdev, PCI_D3hot);
1500d9f36a00SThomas Hellstrom 	return 0;
1501d9f36a00SThomas Hellstrom }
1502d9f36a00SThomas Hellstrom 
vmw_pci_resume(struct pci_dev * pdev)15037fbd721aSThomas Hellstrom static int vmw_pci_resume(struct pci_dev *pdev)
1504d9f36a00SThomas Hellstrom {
1505d9f36a00SThomas Hellstrom 	pci_set_power_state(pdev, PCI_D0);
1506d9f36a00SThomas Hellstrom 	pci_restore_state(pdev);
1507d9f36a00SThomas Hellstrom 	return pci_enable_device(pdev);
1508d9f36a00SThomas Hellstrom }
1509d9f36a00SThomas Hellstrom 
vmw_pm_suspend(struct device * kdev)15107fbd721aSThomas Hellstrom static int vmw_pm_suspend(struct device *kdev)
15117fbd721aSThomas Hellstrom {
15127fbd721aSThomas Hellstrom 	struct pci_dev *pdev = to_pci_dev(kdev);
15137fbd721aSThomas Hellstrom 	struct pm_message dummy;
15147fbd721aSThomas Hellstrom 
15157fbd721aSThomas Hellstrom 	dummy.event = 0;
15167fbd721aSThomas Hellstrom 
15177fbd721aSThomas Hellstrom 	return vmw_pci_suspend(pdev, dummy);
15187fbd721aSThomas Hellstrom }
15197fbd721aSThomas Hellstrom 
vmw_pm_resume(struct device * kdev)15207fbd721aSThomas Hellstrom static int vmw_pm_resume(struct device *kdev)
15217fbd721aSThomas Hellstrom {
15227fbd721aSThomas Hellstrom 	struct pci_dev *pdev = to_pci_dev(kdev);
15237fbd721aSThomas Hellstrom 
15247fbd721aSThomas Hellstrom 	return vmw_pci_resume(pdev);
15257fbd721aSThomas Hellstrom }
15267fbd721aSThomas Hellstrom 
vmw_pm_freeze(struct device * kdev)1527153b3d5bSThomas Hellstrom static int vmw_pm_freeze(struct device *kdev)
15287fbd721aSThomas Hellstrom {
15297fbd721aSThomas Hellstrom 	struct pci_dev *pdev = to_pci_dev(kdev);
15307fbd721aSThomas Hellstrom 	struct drm_device *dev = pci_get_drvdata(pdev);
15317fbd721aSThomas Hellstrom 	struct vmw_private *dev_priv = vmw_priv(dev);
1532d7c59750SChristian König 	struct ttm_operation_ctx ctx = {
1533d7c59750SChristian König 		.interruptible = false,
1534d7c59750SChristian König 		.no_wait_gpu = false
1535d7c59750SChristian König 	};
1536c3b9b165SThomas Hellstrom 	int ret;
15377fbd721aSThomas Hellstrom 
1538c3b9b165SThomas Hellstrom 	/*
1539c3b9b165SThomas Hellstrom 	 * No user-space processes should be running now.
1540c3b9b165SThomas Hellstrom 	 */
15419703bb32SZack Rusin 	ret = vmw_kms_suspend(&dev_priv->drm);
1542c3b9b165SThomas Hellstrom 	if (ret) {
1543c3b9b165SThomas Hellstrom 		DRM_ERROR("Failed to freeze modesetting.\n");
1544c3b9b165SThomas Hellstrom 		return ret;
1545c3b9b165SThomas Hellstrom 	}
15467fbd721aSThomas Hellstrom 
1547c3b9b165SThomas Hellstrom 	vmw_execbuf_release_pinned_bo(dev_priv);
1548c3b9b165SThomas Hellstrom 	vmw_resource_evict_all(dev_priv);
1549c3b9b165SThomas Hellstrom 	vmw_release_device_early(dev_priv);
1550f9e2a03eSChristian König 	while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
1551c3b9b165SThomas Hellstrom 	vmw_fifo_resource_dec(dev_priv);
1552153b3d5bSThomas Hellstrom 	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1553153b3d5bSThomas Hellstrom 		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1554153b3d5bSThomas Hellstrom 		vmw_fifo_resource_inc(dev_priv);
1555153b3d5bSThomas Hellstrom 		WARN_ON(vmw_request_device_late(dev_priv));
1556c3b9b165SThomas Hellstrom 		dev_priv->suspend_locked = false;
1557c3b9b165SThomas Hellstrom 		if (dev_priv->suspend_state)
1558c3b9b165SThomas Hellstrom 			vmw_kms_resume(dev);
15597fbd721aSThomas Hellstrom 		return -EBUSY;
15607fbd721aSThomas Hellstrom 	}
15617fbd721aSThomas Hellstrom 
1562c3b9b165SThomas Hellstrom 	vmw_fence_fifo_down(dev_priv->fman);
1563153b3d5bSThomas Hellstrom 	__vmw_svga_disable(dev_priv);
1564153b3d5bSThomas Hellstrom 
1565153b3d5bSThomas Hellstrom 	vmw_release_device_late(dev_priv);
15667fbd721aSThomas Hellstrom 	return 0;
15677fbd721aSThomas Hellstrom }
15687fbd721aSThomas Hellstrom 
vmw_pm_restore(struct device * kdev)1569153b3d5bSThomas Hellstrom static int vmw_pm_restore(struct device *kdev)
15707fbd721aSThomas Hellstrom {
15717fbd721aSThomas Hellstrom 	struct pci_dev *pdev = to_pci_dev(kdev);
15727fbd721aSThomas Hellstrom 	struct drm_device *dev = pci_get_drvdata(pdev);
15737fbd721aSThomas Hellstrom 	struct vmw_private *dev_priv = vmw_priv(dev);
1574153b3d5bSThomas Hellstrom 	int ret;
15757fbd721aSThomas Hellstrom 
15762cd80dbdSZack Rusin 	vmw_detect_version(dev_priv);
157795e8f6a2SThomas Hellstrom 
1578153b3d5bSThomas Hellstrom 	vmw_fifo_resource_inc(dev_priv);
1579153b3d5bSThomas Hellstrom 
1580153b3d5bSThomas Hellstrom 	ret = vmw_request_device(dev_priv);
1581153b3d5bSThomas Hellstrom 	if (ret)
1582153b3d5bSThomas Hellstrom 		return ret;
1583153b3d5bSThomas Hellstrom 
1584153b3d5bSThomas Hellstrom 	__vmw_svga_enable(dev_priv);
15857fbd721aSThomas Hellstrom 
1586c3b9b165SThomas Hellstrom 	vmw_fence_fifo_up(dev_priv->fman);
1587c3b9b165SThomas Hellstrom 	dev_priv->suspend_locked = false;
1588c3b9b165SThomas Hellstrom 	if (dev_priv->suspend_state)
15899703bb32SZack Rusin 		vmw_kms_resume(&dev_priv->drm);
1590c3b9b165SThomas Hellstrom 
1591153b3d5bSThomas Hellstrom 	return 0;
15927fbd721aSThomas Hellstrom }
15937fbd721aSThomas Hellstrom 
15947fbd721aSThomas Hellstrom static const struct dev_pm_ops vmw_pm_ops = {
1595153b3d5bSThomas Hellstrom 	.freeze = vmw_pm_freeze,
1596153b3d5bSThomas Hellstrom 	.thaw = vmw_pm_restore,
1597153b3d5bSThomas Hellstrom 	.restore = vmw_pm_restore,
15987fbd721aSThomas Hellstrom 	.suspend = vmw_pm_suspend,
15997fbd721aSThomas Hellstrom 	.resume = vmw_pm_resume,
16007fbd721aSThomas Hellstrom };
16017fbd721aSThomas Hellstrom 
1602e08e96deSArjan van de Ven static const struct file_operations vmwgfx_driver_fops = {
1603e08e96deSArjan van de Ven 	.owner = THIS_MODULE,
1604e08e96deSArjan van de Ven 	.open = drm_open,
1605e08e96deSArjan van de Ven 	.release = drm_release,
1606e08e96deSArjan van de Ven 	.unlocked_ioctl = vmw_unlocked_ioctl,
16079da2957fSZack Rusin 	.mmap = drm_gem_mmap,
16082cd80dbdSZack Rusin 	.poll = drm_poll,
16092cd80dbdSZack Rusin 	.read = drm_read,
1610e08e96deSArjan van de Ven #if defined(CONFIG_COMPAT)
161164190bdeSThomas Hellstrom 	.compat_ioctl = vmw_compat_ioctl,
1612e08e96deSArjan van de Ven #endif
1613e08e96deSArjan van de Ven 	.llseek = noop_llseek,
1614e08e96deSArjan van de Ven };
1615e08e96deSArjan van de Ven 
161670a59dd8SDaniel Vetter static const struct drm_driver driver = {
16171ff49481SDaniel Vetter 	.driver_features =
161887b3b45cSZack Rusin 	DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM | DRIVER_CURSOR_HOTSPOT,
1619fb1d9738SJakob Bornecrantz 	.ioctls = vmw_ioctls,
1620f95aeb17SDamien Lespiau 	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
1621fb1d9738SJakob Bornecrantz 	.master_set = vmw_master_set,
1622fb1d9738SJakob Bornecrantz 	.master_drop = vmw_master_drop,
1623fb1d9738SJakob Bornecrantz 	.open = vmw_driver_open,
1624fb1d9738SJakob Bornecrantz 	.postclose = vmw_postclose,
16255e1782d2SDave Airlie 
16265e1782d2SDave Airlie 	.dumb_create = vmw_dumb_create,
16278afa13a0SZack Rusin 	.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
16285e1782d2SDave Airlie 
162969977ff5SThomas Hellstrom 	.prime_fd_to_handle = vmw_prime_fd_to_handle,
163069977ff5SThomas Hellstrom 	.prime_handle_to_fd = vmw_prime_handle_to_fd,
163165674218SZack Rusin 	.gem_prime_import_sg_table = vmw_prime_import_sg_table,
163269977ff5SThomas Hellstrom 
1633e08e96deSArjan van de Ven 	.fops = &vmwgfx_driver_fops,
16348410ea3bSDave Airlie 	.name = VMWGFX_DRIVER_NAME,
16358410ea3bSDave Airlie 	.desc = VMWGFX_DRIVER_DESC,
16368410ea3bSDave Airlie 	.date = VMWGFX_DRIVER_DATE,
16378410ea3bSDave Airlie 	.major = VMWGFX_DRIVER_MAJOR,
16388410ea3bSDave Airlie 	.minor = VMWGFX_DRIVER_MINOR,
16398410ea3bSDave Airlie 	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
16408410ea3bSDave Airlie };
16418410ea3bSDave Airlie 
16428410ea3bSDave Airlie static struct pci_driver vmw_pci_driver = {
1643fb1d9738SJakob Bornecrantz 	.name = VMWGFX_DRIVER_NAME,
1644fb1d9738SJakob Bornecrantz 	.id_table = vmw_pci_id_list,
1645fb1d9738SJakob Bornecrantz 	.probe = vmw_probe,
1646d9f36a00SThomas Hellstrom 	.remove = vmw_remove,
16477fbd721aSThomas Hellstrom 	.driver = {
16487fbd721aSThomas Hellstrom 		.pm = &vmw_pm_ops
16497fbd721aSThomas Hellstrom 	}
1650fb1d9738SJakob Bornecrantz };
1651fb1d9738SJakob Bornecrantz 
vmw_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1652fb1d9738SJakob Bornecrantz static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1653fb1d9738SJakob Bornecrantz {
16549703bb32SZack Rusin 	struct vmw_private *vmw;
165536891da8SThomas Zimmermann 	int ret;
165636891da8SThomas Zimmermann 
165797c9bfe3SThomas Zimmermann 	ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
165831856c8cSZack Rusin 	if (ret)
165928b5f3b6SZack Rusin 		goto out_error;
166031856c8cSZack Rusin 
16619703bb32SZack Rusin 	ret = pcim_enable_device(pdev);
166236891da8SThomas Zimmermann 	if (ret)
166328b5f3b6SZack Rusin 		goto out_error;
166436891da8SThomas Zimmermann 
16659703bb32SZack Rusin 	vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
16669703bb32SZack Rusin 				 struct vmw_private, drm);
166728b5f3b6SZack Rusin 	if (IS_ERR(vmw)) {
166828b5f3b6SZack Rusin 		ret = PTR_ERR(vmw);
166928b5f3b6SZack Rusin 		goto out_error;
167028b5f3b6SZack Rusin 	}
16719703bb32SZack Rusin 
16729703bb32SZack Rusin 	pci_set_drvdata(pdev, &vmw->drm);
16739703bb32SZack Rusin 
16749703bb32SZack Rusin 	ret = vmw_driver_load(vmw, ent->device);
16759703bb32SZack Rusin 	if (ret)
16768aadeb8aSZack Rusin 		goto out_error;
16779703bb32SZack Rusin 
16789703bb32SZack Rusin 	ret = drm_dev_register(&vmw->drm, 0);
167928b5f3b6SZack Rusin 	if (ret)
168028b5f3b6SZack Rusin 		goto out_unload;
168136891da8SThomas Zimmermann 
1682df42523cSZack Rusin 	vmw_fifo_resource_inc(vmw);
1683df42523cSZack Rusin 	vmw_svga_enable(vmw);
1684df42523cSZack Rusin 	drm_fbdev_generic_setup(&vmw->drm,  0);
1685df42523cSZack Rusin 
16868afa13a0SZack Rusin 	vmw_debugfs_gem_init(vmw);
1687af4a25bbSZack Rusin 	vmw_debugfs_resource_managers_init(vmw);
16888afa13a0SZack Rusin 
168936891da8SThomas Zimmermann 	return 0;
169028b5f3b6SZack Rusin out_unload:
169128b5f3b6SZack Rusin 	vmw_driver_unload(&vmw->drm);
169228b5f3b6SZack Rusin out_error:
169328b5f3b6SZack Rusin 	return ret;
1694fb1d9738SJakob Bornecrantz }
1695fb1d9738SJakob Bornecrantz 
1696df8d1d0aSThomas Zimmermann drm_module_pci_driver(vmw_pci_driver);
1697fb1d9738SJakob Bornecrantz 
1698fb1d9738SJakob Bornecrantz MODULE_AUTHOR("VMware Inc. and others");
1699fb1d9738SJakob Bornecrantz MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1700fb1d9738SJakob Bornecrantz MODULE_LICENSE("GPL and additional rights");
170173558eadSThomas Hellstrom MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
170273558eadSThomas Hellstrom 	       __stringify(VMWGFX_DRIVER_MINOR) "."
170373558eadSThomas Hellstrom 	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
170473558eadSThomas Hellstrom 	       "0");
1705