1dff96888SDirk Hohndel (VMware) // SPDX-License-Identifier: GPL-2.0 OR MIT
2fb1d9738SJakob Bornecrantz /**************************************************************************
3fb1d9738SJakob Bornecrantz *
409881d29SZack Rusin * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5fb1d9738SJakob Bornecrantz *
6fb1d9738SJakob Bornecrantz * Permission is hereby granted, free of charge, to any person obtaining a
7fb1d9738SJakob Bornecrantz * copy of this software and associated documentation files (the
8fb1d9738SJakob Bornecrantz * "Software"), to deal in the Software without restriction, including
9fb1d9738SJakob Bornecrantz * without limitation the rights to use, copy, modify, merge, publish,
10fb1d9738SJakob Bornecrantz * distribute, sub license, and/or sell copies of the Software, and to
11fb1d9738SJakob Bornecrantz * permit persons to whom the Software is furnished to do so, subject to
12fb1d9738SJakob Bornecrantz * the following conditions:
13fb1d9738SJakob Bornecrantz *
14fb1d9738SJakob Bornecrantz * The above copyright notice and this permission notice (including the
15fb1d9738SJakob Bornecrantz * next paragraph) shall be included in all copies or substantial portions
16fb1d9738SJakob Bornecrantz * of the Software.
17fb1d9738SJakob Bornecrantz *
18fb1d9738SJakob Bornecrantz * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19fb1d9738SJakob Bornecrantz * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20fb1d9738SJakob Bornecrantz * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21fb1d9738SJakob Bornecrantz * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22fb1d9738SJakob Bornecrantz * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23fb1d9738SJakob Bornecrantz * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24fb1d9738SJakob Bornecrantz * USE OR OTHER DEALINGS IN THE SOFTWARE.
25fb1d9738SJakob Bornecrantz *
26fb1d9738SJakob Bornecrantz **************************************************************************/
276ae8748bSSam Ravnborg
287f4c3377SZack Rusin
297f4c3377SZack Rusin #include "vmwgfx_drv.h"
307f4c3377SZack Rusin
3109881d29SZack Rusin #include "vmwgfx_bo.h"
3209881d29SZack Rusin #include "vmwgfx_binding.h"
337f4c3377SZack Rusin #include "vmwgfx_devcaps.h"
347f4c3377SZack Rusin #include "vmwgfx_mksstat.h"
357f4c3377SZack Rusin #include "ttm_object.h"
36fb1d9738SJakob Bornecrantz
376848c291SThomas Zimmermann #include <drm/drm_aperture.h>
386ae8748bSSam Ravnborg #include <drm/drm_drv.h>
398ab59da2SThomas Zimmermann #include <drm/drm_fbdev_generic.h>
408afa13a0SZack Rusin #include <drm/drm_gem_ttm_helper.h>
41df8d1d0aSThomas Zimmermann #include <drm/drm_ioctl.h>
42df8d1d0aSThomas Zimmermann #include <drm/drm_module.h>
43df8d1d0aSThomas Zimmermann #include <drm/drm_sysfs.h>
44352a81b7SZack Rusin #include <drm/ttm/ttm_range_manager.h>
456ae8748bSSam Ravnborg #include <drm/ttm/ttm_placement.h>
46523375c9SZack Rusin #include <generated/utsrelease.h>
476ae8748bSSam Ravnborg
4835d86fb6SZack Rusin #ifdef CONFIG_X86
4935d86fb6SZack Rusin #include <asm/hypervisor.h>
5035d86fb6SZack Rusin #endif
517f4c3377SZack Rusin #include <linux/cc_platform.h>
527f4c3377SZack Rusin #include <linux/dma-mapping.h>
537f4c3377SZack Rusin #include <linux/module.h>
547f4c3377SZack Rusin #include <linux/pci.h>
557f4c3377SZack Rusin #include <linux/version.h>
56fb1d9738SJakob Bornecrantz
57fb1d9738SJakob Bornecrantz #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
58fb1d9738SJakob Bornecrantz
592cd80dbdSZack Rusin /*
60fb1d9738SJakob Bornecrantz * Fully encoded drm commands. Might move to vmw_drm.h
61fb1d9738SJakob Bornecrantz */
62fb1d9738SJakob Bornecrantz
63fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_GET_PARAM \
64fb1d9738SJakob Bornecrantz DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
65fb1d9738SJakob Bornecrantz struct drm_vmw_getparam_arg)
66fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_ALLOC_DMABUF \
67fb1d9738SJakob Bornecrantz DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
68fb1d9738SJakob Bornecrantz union drm_vmw_alloc_dmabuf_arg)
69fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_UNREF_DMABUF \
70fb1d9738SJakob Bornecrantz DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
71fb1d9738SJakob Bornecrantz struct drm_vmw_unref_dmabuf_arg)
72fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_CURSOR_BYPASS \
73fb1d9738SJakob Bornecrantz DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
74fb1d9738SJakob Bornecrantz struct drm_vmw_cursor_bypass_arg)
75fb1d9738SJakob Bornecrantz
76fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_CONTROL_STREAM \
77fb1d9738SJakob Bornecrantz DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
78fb1d9738SJakob Bornecrantz struct drm_vmw_control_stream_arg)
79fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_CLAIM_STREAM \
80fb1d9738SJakob Bornecrantz DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
81fb1d9738SJakob Bornecrantz struct drm_vmw_stream_arg)
82fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_UNREF_STREAM \
83fb1d9738SJakob Bornecrantz DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
84fb1d9738SJakob Bornecrantz struct drm_vmw_stream_arg)
85fb1d9738SJakob Bornecrantz
86fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_CREATE_CONTEXT \
87fb1d9738SJakob Bornecrantz DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
88fb1d9738SJakob Bornecrantz struct drm_vmw_context_arg)
89fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_UNREF_CONTEXT \
90fb1d9738SJakob Bornecrantz DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
91fb1d9738SJakob Bornecrantz struct drm_vmw_context_arg)
92fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_CREATE_SURFACE \
93fb1d9738SJakob Bornecrantz DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
94fb1d9738SJakob Bornecrantz union drm_vmw_surface_create_arg)
95fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_UNREF_SURFACE \
96fb1d9738SJakob Bornecrantz DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
97fb1d9738SJakob Bornecrantz struct drm_vmw_surface_arg)
98fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_REF_SURFACE \
99fb1d9738SJakob Bornecrantz DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
100fb1d9738SJakob Bornecrantz union drm_vmw_surface_reference_arg)
101fb1d9738SJakob Bornecrantz #define DRM_IOCTL_VMW_EXECBUF \
102fb1d9738SJakob Bornecrantz DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
103fb1d9738SJakob Bornecrantz struct drm_vmw_execbuf_arg)
104f63f6a59SThomas Hellstrom #define DRM_IOCTL_VMW_GET_3D_CAP \
105f63f6a59SThomas Hellstrom DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
106f63f6a59SThomas Hellstrom struct drm_vmw_get_3d_cap_arg)
107ae2a1040SThomas Hellstrom #define DRM_IOCTL_VMW_FENCE_WAIT \
108ae2a1040SThomas Hellstrom DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
109ae2a1040SThomas Hellstrom struct drm_vmw_fence_wait_arg)
110ae2a1040SThomas Hellstrom #define DRM_IOCTL_VMW_FENCE_SIGNALED \
111ae2a1040SThomas Hellstrom DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
112ae2a1040SThomas Hellstrom struct drm_vmw_fence_signaled_arg)
113ae2a1040SThomas Hellstrom #define DRM_IOCTL_VMW_FENCE_UNREF \
114ae2a1040SThomas Hellstrom DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
115ae2a1040SThomas Hellstrom struct drm_vmw_fence_arg)
11657c5ee79SThomas Hellstrom #define DRM_IOCTL_VMW_FENCE_EVENT \
11757c5ee79SThomas Hellstrom DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
11857c5ee79SThomas Hellstrom struct drm_vmw_fence_event_arg)
1192fcd5a73SJakob Bornecrantz #define DRM_IOCTL_VMW_PRESENT \
1202fcd5a73SJakob Bornecrantz DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
1212fcd5a73SJakob Bornecrantz struct drm_vmw_present_arg)
1222fcd5a73SJakob Bornecrantz #define DRM_IOCTL_VMW_PRESENT_READBACK \
1232fcd5a73SJakob Bornecrantz DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
1242fcd5a73SJakob Bornecrantz struct drm_vmw_present_readback_arg)
125cd2b89e7SThomas Hellstrom #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
126cd2b89e7SThomas Hellstrom DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
127cd2b89e7SThomas Hellstrom struct drm_vmw_update_layout_arg)
128c74c162fSThomas Hellstrom #define DRM_IOCTL_VMW_CREATE_SHADER \
129c74c162fSThomas Hellstrom DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
130c74c162fSThomas Hellstrom struct drm_vmw_shader_create_arg)
131c74c162fSThomas Hellstrom #define DRM_IOCTL_VMW_UNREF_SHADER \
132c74c162fSThomas Hellstrom DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
133c74c162fSThomas Hellstrom struct drm_vmw_shader_arg)
134a97e2192SThomas Hellstrom #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
135a97e2192SThomas Hellstrom DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
136a97e2192SThomas Hellstrom union drm_vmw_gb_surface_create_arg)
137a97e2192SThomas Hellstrom #define DRM_IOCTL_VMW_GB_SURFACE_REF \
138a97e2192SThomas Hellstrom DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
139a97e2192SThomas Hellstrom union drm_vmw_gb_surface_reference_arg)
1401d7a5cbfSThomas Hellstrom #define DRM_IOCTL_VMW_SYNCCPU \
1411d7a5cbfSThomas Hellstrom DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
1421d7a5cbfSThomas Hellstrom struct drm_vmw_synccpu_arg)
143d80efd5cSThomas Hellstrom #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
144d80efd5cSThomas Hellstrom DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
145d80efd5cSThomas Hellstrom struct drm_vmw_context_arg)
14614b1c33eSDeepak Rawat #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
14714b1c33eSDeepak Rawat DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
14814b1c33eSDeepak Rawat union drm_vmw_gb_surface_create_ext_arg)
14914b1c33eSDeepak Rawat #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
15014b1c33eSDeepak Rawat DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
15114b1c33eSDeepak Rawat union drm_vmw_gb_surface_reference_ext_arg)
152cb92a323SRoland Scheidegger #define DRM_IOCTL_VMW_MSG \
153cb92a323SRoland Scheidegger DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
154cb92a323SRoland Scheidegger struct drm_vmw_msg_arg)
1557a7a933eSMartin Krastev #define DRM_IOCTL_VMW_MKSSTAT_RESET \
1567a7a933eSMartin Krastev DRM_IO(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_RESET)
1577a7a933eSMartin Krastev #define DRM_IOCTL_VMW_MKSSTAT_ADD \
1587a7a933eSMartin Krastev DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_ADD, \
1597a7a933eSMartin Krastev struct drm_vmw_mksstat_add_arg)
1607a7a933eSMartin Krastev #define DRM_IOCTL_VMW_MKSSTAT_REMOVE \
1617a7a933eSMartin Krastev DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_REMOVE, \
1627a7a933eSMartin Krastev struct drm_vmw_mksstat_remove_arg)
163fb1d9738SJakob Bornecrantz
164e68cefd1SLee Jones /*
165fb1d9738SJakob Bornecrantz * Ioctl definitions.
166fb1d9738SJakob Bornecrantz */
167fb1d9738SJakob Bornecrantz
168baa70943SRob Clark static const struct drm_ioctl_desc vmw_ioctls[] = {
169f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl,
1700d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
1718afa13a0SZack Rusin DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl,
1720d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
173f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
174f8c47144SDaniel Vetter DRM_RENDER_ALLOW),
175f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_CURSOR_BYPASS,
176e1f78003SThomas Hellstrom vmw_kms_cursor_bypass_ioctl,
177190c462dSDaniel Vetter DRM_MASTER),
178fb1d9738SJakob Bornecrantz
179f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
180190c462dSDaniel Vetter DRM_MASTER),
181f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
182190c462dSDaniel Vetter DRM_MASTER),
183f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
184190c462dSDaniel Vetter DRM_MASTER),
185fb1d9738SJakob Bornecrantz
186f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
1870d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
188f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
189f8c47144SDaniel Vetter DRM_RENDER_ALLOW),
190f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
1910d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
192f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
193f8c47144SDaniel Vetter DRM_RENDER_ALLOW),
194f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
1950d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
196f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_EXECBUF, vmw_execbuf_ioctl,
197d80efd5cSThomas Hellstrom DRM_RENDER_ALLOW),
198f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
199f8c47144SDaniel Vetter DRM_RENDER_ALLOW),
200f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_FENCE_SIGNALED,
201ae2a1040SThomas Hellstrom vmw_fence_obj_signaled_ioctl,
202f8c47144SDaniel Vetter DRM_RENDER_ALLOW),
203f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
204f8c47144SDaniel Vetter DRM_RENDER_ALLOW),
205f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
2060d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
207f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
2080d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
2092fcd5a73SJakob Bornecrantz
2102fcd5a73SJakob Bornecrantz /* these allow direct access to the framebuffers mark as master only */
211f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_PRESENT, vmw_present_ioctl,
212f8c47144SDaniel Vetter DRM_MASTER | DRM_AUTH),
213f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_PRESENT_READBACK,
2142fcd5a73SJakob Bornecrantz vmw_present_readback_ioctl,
215f8c47144SDaniel Vetter DRM_MASTER | DRM_AUTH),
21631788ca8SThomas Hellstrom /*
21731788ca8SThomas Hellstrom * The permissions of the below ioctl are overridden in
21831788ca8SThomas Hellstrom * vmw_generic_ioctl(). We require either
21931788ca8SThomas Hellstrom * DRM_MASTER or capable(CAP_SYS_ADMIN).
22031788ca8SThomas Hellstrom */
221f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_UPDATE_LAYOUT,
222cd2b89e7SThomas Hellstrom vmw_kms_update_layout_ioctl,
22331788ca8SThomas Hellstrom DRM_RENDER_ALLOW),
224f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_CREATE_SHADER,
225c74c162fSThomas Hellstrom vmw_shader_define_ioctl,
2260d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
227f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_UNREF_SHADER,
228c74c162fSThomas Hellstrom vmw_shader_destroy_ioctl,
229f8c47144SDaniel Vetter DRM_RENDER_ALLOW),
230f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE,
231a97e2192SThomas Hellstrom vmw_gb_surface_define_ioctl,
2320d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
233f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF,
234a97e2192SThomas Hellstrom vmw_gb_surface_reference_ioctl,
2350d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
236f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_SYNCCPU,
237f1d34bfdSThomas Hellstrom vmw_user_bo_synccpu_ioctl,
238f8c47144SDaniel Vetter DRM_RENDER_ALLOW),
239f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_CREATE_EXTENDED_CONTEXT,
240d80efd5cSThomas Hellstrom vmw_extended_context_define_ioctl,
2410d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
242f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE_EXT,
24314b1c33eSDeepak Rawat vmw_gb_surface_define_ext_ioctl,
2440d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
245f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF_EXT,
24614b1c33eSDeepak Rawat vmw_gb_surface_reference_ext_ioctl,
2470d4c19f9SEmil Velikov DRM_RENDER_ALLOW),
248f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_MSG,
249cb92a323SRoland Scheidegger vmw_msg_ioctl,
250cb92a323SRoland Scheidegger DRM_RENDER_ALLOW),
251f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_RESET,
2527a7a933eSMartin Krastev vmw_mksstat_reset_ioctl,
2537a7a933eSMartin Krastev DRM_RENDER_ALLOW),
254f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_ADD,
2557a7a933eSMartin Krastev vmw_mksstat_add_ioctl,
2567a7a933eSMartin Krastev DRM_RENDER_ALLOW),
257f1f3e375SZack Rusin DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_REMOVE,
2587a7a933eSMartin Krastev vmw_mksstat_remove_ioctl,
2597a7a933eSMartin Krastev DRM_RENDER_ALLOW),
260fb1d9738SJakob Bornecrantz };
261fb1d9738SJakob Bornecrantz
2628046306fSArvind Yadav static const struct pci_device_id vmw_pci_id_list[] = {
2638ad0c3fdSZack Rusin { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA2) },
2648ad0c3fdSZack Rusin { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA3) },
2658772c0bbSZack Rusin { }
266fb1d9738SJakob Bornecrantz };
267c4903429SDave Airlie MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
268fb1d9738SJakob Bornecrantz
269d92d9851SThomas Hellstrom static int vmw_restrict_iommu;
270d92d9851SThomas Hellstrom static int vmw_force_coherent;
2710d00c488SThomas Hellstrom static int vmw_restrict_dma_mask;
27204319d89SSinclair Yeh static int vmw_assume_16bpp;
273fb1d9738SJakob Bornecrantz
274fb1d9738SJakob Bornecrantz static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
275d9f36a00SThomas Hellstrom static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
276d9f36a00SThomas Hellstrom void *ptr);
277fb1d9738SJakob Bornecrantz
278d92d9851SThomas Hellstrom MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
27950f83737SØyvind A. Holm module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
280d92d9851SThomas Hellstrom MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
28150f83737SØyvind A. Holm module_param_named(force_coherent, vmw_force_coherent, int, 0600);
2820d00c488SThomas Hellstrom MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
2837a9d2001SØyvind A. Holm module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
28404319d89SSinclair Yeh MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
28504319d89SSinclair Yeh module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
286d92d9851SThomas Hellstrom
28730c78bb8SThomas Hellstrom
2882b273544SZack Rusin struct bitmap_name {
2892b273544SZack Rusin uint32 value;
2902b273544SZack Rusin const char *name;
2912b273544SZack Rusin };
2922b273544SZack Rusin
2932b273544SZack Rusin static const struct bitmap_name cap1_names[] = {
2942b273544SZack Rusin { SVGA_CAP_RECT_COPY, "rect copy" },
2952b273544SZack Rusin { SVGA_CAP_CURSOR, "cursor" },
2962b273544SZack Rusin { SVGA_CAP_CURSOR_BYPASS, "cursor bypass" },
2972b273544SZack Rusin { SVGA_CAP_CURSOR_BYPASS_2, "cursor bypass 2" },
2982b273544SZack Rusin { SVGA_CAP_8BIT_EMULATION, "8bit emulation" },
2992b273544SZack Rusin { SVGA_CAP_ALPHA_CURSOR, "alpha cursor" },
3002b273544SZack Rusin { SVGA_CAP_3D, "3D" },
3012b273544SZack Rusin { SVGA_CAP_EXTENDED_FIFO, "extended fifo" },
3022b273544SZack Rusin { SVGA_CAP_MULTIMON, "multimon" },
3032b273544SZack Rusin { SVGA_CAP_PITCHLOCK, "pitchlock" },
3042b273544SZack Rusin { SVGA_CAP_IRQMASK, "irq mask" },
3052b273544SZack Rusin { SVGA_CAP_DISPLAY_TOPOLOGY, "display topology" },
3062b273544SZack Rusin { SVGA_CAP_GMR, "gmr" },
3072b273544SZack Rusin { SVGA_CAP_TRACES, "traces" },
3082b273544SZack Rusin { SVGA_CAP_GMR2, "gmr2" },
3092b273544SZack Rusin { SVGA_CAP_SCREEN_OBJECT_2, "screen object 2" },
3102b273544SZack Rusin { SVGA_CAP_COMMAND_BUFFERS, "command buffers" },
3112b273544SZack Rusin { SVGA_CAP_CMD_BUFFERS_2, "command buffers 2" },
3122b273544SZack Rusin { SVGA_CAP_GBOBJECTS, "gbobject" },
3132b273544SZack Rusin { SVGA_CAP_DX, "dx" },
3142b273544SZack Rusin { SVGA_CAP_HP_CMD_QUEUE, "hp cmd queue" },
3152b273544SZack Rusin { SVGA_CAP_NO_BB_RESTRICTION, "no bb restriction" },
3162b273544SZack Rusin { SVGA_CAP_CAP2_REGISTER, "cap2 register" },
3172b273544SZack Rusin };
3182b273544SZack Rusin
3192b273544SZack Rusin
3202b273544SZack Rusin static const struct bitmap_name cap2_names[] = {
3212b273544SZack Rusin { SVGA_CAP2_GROW_OTABLE, "grow otable" },
3222b273544SZack Rusin { SVGA_CAP2_INTRA_SURFACE_COPY, "intra surface copy" },
3232b273544SZack Rusin { SVGA_CAP2_DX2, "dx2" },
3242b273544SZack Rusin { SVGA_CAP2_GB_MEMSIZE_2, "gb memsize 2" },
3252b273544SZack Rusin { SVGA_CAP2_SCREENDMA_REG, "screendma reg" },
3262b273544SZack Rusin { SVGA_CAP2_OTABLE_PTDEPTH_2, "otable ptdepth2" },
3272b273544SZack Rusin { SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT, "non ms to ms stretchblt" },
3282b273544SZack Rusin { SVGA_CAP2_CURSOR_MOB, "cursor mob" },
3292b273544SZack Rusin { SVGA_CAP2_MSHINT, "mshint" },
3302b273544SZack Rusin { SVGA_CAP2_CB_MAX_SIZE_4MB, "cb max size 4mb" },
3312b273544SZack Rusin { SVGA_CAP2_DX3, "dx3" },
3322b273544SZack Rusin { SVGA_CAP2_FRAME_TYPE, "frame type" },
3332b273544SZack Rusin { SVGA_CAP2_COTABLE_COPY, "cotable copy" },
3342b273544SZack Rusin { SVGA_CAP2_TRACE_FULL_FB, "trace full fb" },
3352b273544SZack Rusin { SVGA_CAP2_EXTRA_REGS, "extra regs" },
3362b273544SZack Rusin { SVGA_CAP2_LO_STAGING, "lo staging" },
3372b273544SZack Rusin };
3382b273544SZack Rusin
vmw_print_bitmap(struct drm_device * drm,const char * prefix,uint32_t bitmap,const struct bitmap_name * bnames,uint32_t num_names)3392b273544SZack Rusin static void vmw_print_bitmap(struct drm_device *drm,
3402b273544SZack Rusin const char *prefix, uint32_t bitmap,
3412b273544SZack Rusin const struct bitmap_name *bnames,
3422b273544SZack Rusin uint32_t num_names)
3433b4c2511SNeha Bhende {
3442b273544SZack Rusin char buf[512];
3452b273544SZack Rusin uint32_t i;
3462b273544SZack Rusin uint32_t offset = 0;
3472b273544SZack Rusin for (i = 0; i < num_names; ++i) {
3482b273544SZack Rusin if ((bitmap & bnames[i].value) != 0) {
3492b273544SZack Rusin offset += snprintf(buf + offset,
3502b273544SZack Rusin ARRAY_SIZE(buf) - offset,
3512b273544SZack Rusin "%s, ", bnames[i].name);
3522b273544SZack Rusin bitmap &= ~bnames[i].value;
3532b273544SZack Rusin }
3543b4c2511SNeha Bhende }
3553b4c2511SNeha Bhende
3562b273544SZack Rusin drm_info(drm, "%s: %s\n", prefix, buf);
3572b273544SZack Rusin if (bitmap != 0)
3582b273544SZack Rusin drm_dbg(drm, "%s: unknown enums: %x\n", prefix, bitmap);
3592b273544SZack Rusin }
3602b273544SZack Rusin
3612b273544SZack Rusin
vmw_print_sm_type(struct vmw_private * dev_priv)3622b273544SZack Rusin static void vmw_print_sm_type(struct vmw_private *dev_priv)
363fb1d9738SJakob Bornecrantz {
3642b273544SZack Rusin static const char *names[] = {
3652b273544SZack Rusin [VMW_SM_LEGACY] = "Legacy",
3662b273544SZack Rusin [VMW_SM_4] = "SM4",
3672b273544SZack Rusin [VMW_SM_4_1] = "SM4_1",
3682b273544SZack Rusin [VMW_SM_5] = "SM_5",
3694fb9326bSZack Rusin [VMW_SM_5_1X] = "SM_5_1X",
3702b273544SZack Rusin [VMW_SM_MAX] = "Invalid"
3712b273544SZack Rusin };
3722b273544SZack Rusin BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1));
3732b273544SZack Rusin drm_info(&dev_priv->drm, "Available shader model: %s.\n",
3742b273544SZack Rusin names[dev_priv->sm_type]);
375fb1d9738SJakob Bornecrantz }
376fb1d9738SJakob Bornecrantz
377e2fa3a76SThomas Hellstrom /**
3784b9e45e6SThomas Hellstrom * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
379e2fa3a76SThomas Hellstrom *
3804b9e45e6SThomas Hellstrom * @dev_priv: A device private structure.
381e2fa3a76SThomas Hellstrom *
3824b9e45e6SThomas Hellstrom * This function creates a small buffer object that holds the query
3834b9e45e6SThomas Hellstrom * result for dummy queries emitted as query barriers.
3844b9e45e6SThomas Hellstrom * The function will then map the first page and initialize a pending
3854b9e45e6SThomas Hellstrom * occlusion query result structure, Finally it will unmap the buffer.
3864b9e45e6SThomas Hellstrom * No interruptible waits are done within this function.
387e2fa3a76SThomas Hellstrom *
3884b9e45e6SThomas Hellstrom * Returns an error if bo creation or initialization fails.
389e2fa3a76SThomas Hellstrom */
vmw_dummy_query_bo_create(struct vmw_private * dev_priv)3904b9e45e6SThomas Hellstrom static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
391e2fa3a76SThomas Hellstrom {
3924b9e45e6SThomas Hellstrom int ret;
39309881d29SZack Rusin struct vmw_bo *vbo;
394e2fa3a76SThomas Hellstrom struct ttm_bo_kmap_obj map;
395e2fa3a76SThomas Hellstrom volatile SVGA3dQueryResult *result;
396e2fa3a76SThomas Hellstrom bool dummy;
397668b2066SZack Rusin struct vmw_bo_params bo_params = {
398668b2066SZack Rusin .domain = VMW_BO_DOMAIN_SYS,
399668b2066SZack Rusin .busy_domain = VMW_BO_DOMAIN_SYS,
400668b2066SZack Rusin .bo_type = ttm_bo_type_kernel,
401668b2066SZack Rusin .size = PAGE_SIZE,
402*5faf45beSIan Forbes .pin = true,
403*5faf45beSIan Forbes .keep_resv = true,
404668b2066SZack Rusin };
405e2fa3a76SThomas Hellstrom
4064b9e45e6SThomas Hellstrom /*
407459d0fa7SThomas Hellstrom * Create the vbo as pinned, so that a tryreserve will
4084b9e45e6SThomas Hellstrom * immediately succeed. This is because we're the only
4094b9e45e6SThomas Hellstrom * user of the bo currently.
4104b9e45e6SThomas Hellstrom */
411668b2066SZack Rusin ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
412e2fa3a76SThomas Hellstrom if (unlikely(ret != 0))
4134b9e45e6SThomas Hellstrom return ret;
4144b9e45e6SThomas Hellstrom
415668b2066SZack Rusin ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
416e2fa3a76SThomas Hellstrom if (likely(ret == 0)) {
417e2fa3a76SThomas Hellstrom result = ttm_kmap_obj_virtual(&map, &dummy);
418e2fa3a76SThomas Hellstrom result->totalSize = sizeof(*result);
419e2fa3a76SThomas Hellstrom result->state = SVGA3D_QUERYSTATE_PENDING;
420e2fa3a76SThomas Hellstrom result->result32 = 0xff;
421e2fa3a76SThomas Hellstrom ttm_bo_kunmap(&map);
4224b9e45e6SThomas Hellstrom }
423459d0fa7SThomas Hellstrom vmw_bo_pin_reserved(vbo, false);
424668b2066SZack Rusin ttm_bo_unreserve(&vbo->tbo);
4254b9e45e6SThomas Hellstrom
4264b9e45e6SThomas Hellstrom if (unlikely(ret != 0)) {
4274b9e45e6SThomas Hellstrom DRM_ERROR("Dummy query buffer map failed.\n");
428f1d34bfdSThomas Hellstrom vmw_bo_unreference(&vbo);
4294b9e45e6SThomas Hellstrom } else
430459d0fa7SThomas Hellstrom dev_priv->dummy_query_bo = vbo;
4314b9e45e6SThomas Hellstrom
4324b9e45e6SThomas Hellstrom return ret;
433e2fa3a76SThomas Hellstrom }
434e2fa3a76SThomas Hellstrom
vmw_device_init(struct vmw_private * dev_priv)4352cd80dbdSZack Rusin static int vmw_device_init(struct vmw_private *dev_priv)
4362cd80dbdSZack Rusin {
4372cd80dbdSZack Rusin bool uses_fb_traces = false;
4382cd80dbdSZack Rusin
4392cd80dbdSZack Rusin dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
4402cd80dbdSZack Rusin dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
4412cd80dbdSZack Rusin dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
4422cd80dbdSZack Rusin
4432cd80dbdSZack Rusin vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
4442cd80dbdSZack Rusin SVGA_REG_ENABLE_HIDE);
4452cd80dbdSZack Rusin
4462cd80dbdSZack Rusin uses_fb_traces = !vmw_cmd_supported(dev_priv) &&
4472cd80dbdSZack Rusin (dev_priv->capabilities & SVGA_CAP_TRACES) != 0;
4482cd80dbdSZack Rusin
4492cd80dbdSZack Rusin vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces);
4502cd80dbdSZack Rusin dev_priv->fifo = vmw_fifo_create(dev_priv);
4512cd80dbdSZack Rusin if (IS_ERR(dev_priv->fifo)) {
4522cd80dbdSZack Rusin int err = PTR_ERR(dev_priv->fifo);
4532cd80dbdSZack Rusin dev_priv->fifo = NULL;
4542cd80dbdSZack Rusin return err;
4552cd80dbdSZack Rusin } else if (!dev_priv->fifo) {
4562cd80dbdSZack Rusin vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
4572cd80dbdSZack Rusin }
4582cd80dbdSZack Rusin
4592cd80dbdSZack Rusin dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
4602cd80dbdSZack Rusin atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
4612cd80dbdSZack Rusin return 0;
4622cd80dbdSZack Rusin }
4632cd80dbdSZack Rusin
vmw_device_fini(struct vmw_private * vmw)4642cd80dbdSZack Rusin static void vmw_device_fini(struct vmw_private *vmw)
4652cd80dbdSZack Rusin {
4662cd80dbdSZack Rusin /*
4672cd80dbdSZack Rusin * Legacy sync
4682cd80dbdSZack Rusin */
4692cd80dbdSZack Rusin vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
4702cd80dbdSZack Rusin while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
4712cd80dbdSZack Rusin ;
4722cd80dbdSZack Rusin
4732cd80dbdSZack Rusin vmw->last_read_seqno = vmw_fence_read(vmw);
4742cd80dbdSZack Rusin
4752cd80dbdSZack Rusin vmw_write(vmw, SVGA_REG_CONFIG_DONE,
4762cd80dbdSZack Rusin vmw->config_done_state);
4772cd80dbdSZack Rusin vmw_write(vmw, SVGA_REG_ENABLE,
4782cd80dbdSZack Rusin vmw->enable_state);
4792cd80dbdSZack Rusin vmw_write(vmw, SVGA_REG_TRACES,
4802cd80dbdSZack Rusin vmw->traces_state);
4812cd80dbdSZack Rusin
4822cd80dbdSZack Rusin vmw_fifo_destroy(vmw);
4832cd80dbdSZack Rusin }
4842cd80dbdSZack Rusin
485153b3d5bSThomas Hellstrom /**
486153b3d5bSThomas Hellstrom * vmw_request_device_late - Perform late device setup
487153b3d5bSThomas Hellstrom *
488153b3d5bSThomas Hellstrom * @dev_priv: Pointer to device private.
489153b3d5bSThomas Hellstrom *
490153b3d5bSThomas Hellstrom * This function performs setup of otables and enables large command
491153b3d5bSThomas Hellstrom * buffer submission. These tasks are split out to a separate function
492153b3d5bSThomas Hellstrom * because it reverts vmw_release_device_early and is intended to be used
493153b3d5bSThomas Hellstrom * by an error path in the hibernation code.
494153b3d5bSThomas Hellstrom */
vmw_request_device_late(struct vmw_private * dev_priv)495153b3d5bSThomas Hellstrom static int vmw_request_device_late(struct vmw_private *dev_priv)
496153b3d5bSThomas Hellstrom {
497153b3d5bSThomas Hellstrom int ret;
498153b3d5bSThomas Hellstrom
499153b3d5bSThomas Hellstrom if (dev_priv->has_mob) {
500153b3d5bSThomas Hellstrom ret = vmw_otables_setup(dev_priv);
501153b3d5bSThomas Hellstrom if (unlikely(ret != 0)) {
502153b3d5bSThomas Hellstrom DRM_ERROR("Unable to initialize "
503153b3d5bSThomas Hellstrom "guest Memory OBjects.\n");
504153b3d5bSThomas Hellstrom return ret;
505153b3d5bSThomas Hellstrom }
506153b3d5bSThomas Hellstrom }
507153b3d5bSThomas Hellstrom
5083eab3d9eSThomas Hellstrom if (dev_priv->cman) {
5098426ed9cSZack Rusin ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
5103eab3d9eSThomas Hellstrom if (ret) {
5113eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man = dev_priv->cman;
5123eab3d9eSThomas Hellstrom
5133eab3d9eSThomas Hellstrom dev_priv->cman = NULL;
5143eab3d9eSThomas Hellstrom vmw_cmdbuf_man_destroy(man);
5153eab3d9eSThomas Hellstrom }
5163eab3d9eSThomas Hellstrom }
5173eab3d9eSThomas Hellstrom
518153b3d5bSThomas Hellstrom return 0;
519153b3d5bSThomas Hellstrom }
520153b3d5bSThomas Hellstrom
vmw_request_device(struct vmw_private * dev_priv)521fb1d9738SJakob Bornecrantz static int vmw_request_device(struct vmw_private *dev_priv)
522fb1d9738SJakob Bornecrantz {
523fb1d9738SJakob Bornecrantz int ret;
524fb1d9738SJakob Bornecrantz
5252cd80dbdSZack Rusin ret = vmw_device_init(dev_priv);
526fb1d9738SJakob Bornecrantz if (unlikely(ret != 0)) {
5272cd80dbdSZack Rusin DRM_ERROR("Unable to initialize the device.\n");
528fb1d9738SJakob Bornecrantz return ret;
529fb1d9738SJakob Bornecrantz }
530ae2a1040SThomas Hellstrom vmw_fence_fifo_up(dev_priv->fman);
5313eab3d9eSThomas Hellstrom dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
532d80efd5cSThomas Hellstrom if (IS_ERR(dev_priv->cman)) {
5333eab3d9eSThomas Hellstrom dev_priv->cman = NULL;
534878c6ecdSDeepak Rawat dev_priv->sm_type = VMW_SM_LEGACY;
535d80efd5cSThomas Hellstrom }
536153b3d5bSThomas Hellstrom
537153b3d5bSThomas Hellstrom ret = vmw_request_device_late(dev_priv);
538153b3d5bSThomas Hellstrom if (ret)
5393530bdc3SThomas Hellstrom goto out_no_mob;
540153b3d5bSThomas Hellstrom
541e2fa3a76SThomas Hellstrom ret = vmw_dummy_query_bo_create(dev_priv);
542e2fa3a76SThomas Hellstrom if (unlikely(ret != 0))
543e2fa3a76SThomas Hellstrom goto out_no_query_bo;
544fb1d9738SJakob Bornecrantz
545fb1d9738SJakob Bornecrantz return 0;
546e2fa3a76SThomas Hellstrom
547e2fa3a76SThomas Hellstrom out_no_query_bo:
5483eab3d9eSThomas Hellstrom if (dev_priv->cman)
5493eab3d9eSThomas Hellstrom vmw_cmdbuf_remove_pool(dev_priv->cman);
550153b3d5bSThomas Hellstrom if (dev_priv->has_mob) {
5514ce032d6SChristian König struct ttm_resource_manager *man;
5524ce032d6SChristian König
5534ce032d6SChristian König man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
5544ce032d6SChristian König ttm_resource_manager_evict_all(&dev_priv->bdev, man);
5553530bdc3SThomas Hellstrom vmw_otables_takedown(dev_priv);
556153b3d5bSThomas Hellstrom }
5573eab3d9eSThomas Hellstrom if (dev_priv->cman)
5583eab3d9eSThomas Hellstrom vmw_cmdbuf_man_destroy(dev_priv->cman);
5593530bdc3SThomas Hellstrom out_no_mob:
560e2fa3a76SThomas Hellstrom vmw_fence_fifo_down(dev_priv->fman);
5612cd80dbdSZack Rusin vmw_device_fini(dev_priv);
562e2fa3a76SThomas Hellstrom return ret;
563fb1d9738SJakob Bornecrantz }
564fb1d9738SJakob Bornecrantz
565153b3d5bSThomas Hellstrom /**
566153b3d5bSThomas Hellstrom * vmw_release_device_early - Early part of fifo takedown.
567153b3d5bSThomas Hellstrom *
568153b3d5bSThomas Hellstrom * @dev_priv: Pointer to device private struct.
569153b3d5bSThomas Hellstrom *
570153b3d5bSThomas Hellstrom * This is the first part of command submission takedown, to be called before
571153b3d5bSThomas Hellstrom * buffer management is taken down.
572153b3d5bSThomas Hellstrom */
vmw_release_device_early(struct vmw_private * dev_priv)573153b3d5bSThomas Hellstrom static void vmw_release_device_early(struct vmw_private *dev_priv)
574fb1d9738SJakob Bornecrantz {
575e2fa3a76SThomas Hellstrom /*
576e2fa3a76SThomas Hellstrom * Previous destructions should've released
577e2fa3a76SThomas Hellstrom * the pinned bo.
578e2fa3a76SThomas Hellstrom */
579e2fa3a76SThomas Hellstrom
580e2fa3a76SThomas Hellstrom BUG_ON(dev_priv->pinned_bo != NULL);
581e2fa3a76SThomas Hellstrom
582f1d34bfdSThomas Hellstrom vmw_bo_unreference(&dev_priv->dummy_query_bo);
5833eab3d9eSThomas Hellstrom if (dev_priv->cman)
5843eab3d9eSThomas Hellstrom vmw_cmdbuf_remove_pool(dev_priv->cman);
5853eab3d9eSThomas Hellstrom
586153b3d5bSThomas Hellstrom if (dev_priv->has_mob) {
5874ce032d6SChristian König struct ttm_resource_manager *man;
5884ce032d6SChristian König
5894ce032d6SChristian König man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
5904ce032d6SChristian König ttm_resource_manager_evict_all(&dev_priv->bdev, man);
5913530bdc3SThomas Hellstrom vmw_otables_takedown(dev_priv);
592153b3d5bSThomas Hellstrom }
593153b3d5bSThomas Hellstrom }
594153b3d5bSThomas Hellstrom
595153b3d5bSThomas Hellstrom /**
596153b3d5bSThomas Hellstrom * vmw_release_device_late - Late part of fifo takedown.
597153b3d5bSThomas Hellstrom *
598153b3d5bSThomas Hellstrom * @dev_priv: Pointer to device private struct.
599153b3d5bSThomas Hellstrom *
600153b3d5bSThomas Hellstrom * This is the last part of the command submission takedown, to be called when
601153b3d5bSThomas Hellstrom * command submission is no longer needed. It may wait on pending fences.
602153b3d5bSThomas Hellstrom */
vmw_release_device_late(struct vmw_private * dev_priv)603153b3d5bSThomas Hellstrom static void vmw_release_device_late(struct vmw_private *dev_priv)
604153b3d5bSThomas Hellstrom {
605ae2a1040SThomas Hellstrom vmw_fence_fifo_down(dev_priv->fman);
6063eab3d9eSThomas Hellstrom if (dev_priv->cman)
6073eab3d9eSThomas Hellstrom vmw_cmdbuf_man_destroy(dev_priv->cman);
6083eab3d9eSThomas Hellstrom
6092cd80dbdSZack Rusin vmw_device_fini(dev_priv);
610fb1d9738SJakob Bornecrantz }
611fb1d9738SJakob Bornecrantz
612e68cefd1SLee Jones /*
613eb4f923bSJakob Bornecrantz * Sets the initial_[width|height] fields on the given vmw_private.
614eb4f923bSJakob Bornecrantz *
615eb4f923bSJakob Bornecrantz * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
61667d4a87bSThomas Hellstrom * clamping the value to fb_max_[width|height] fields and the
61767d4a87bSThomas Hellstrom * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
61867d4a87bSThomas Hellstrom * If the values appear to be invalid, set them to
619eb4f923bSJakob Bornecrantz * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
620eb4f923bSJakob Bornecrantz */
vmw_get_initial_size(struct vmw_private * dev_priv)621eb4f923bSJakob Bornecrantz static void vmw_get_initial_size(struct vmw_private *dev_priv)
622eb4f923bSJakob Bornecrantz {
623eb4f923bSJakob Bornecrantz uint32_t width;
624eb4f923bSJakob Bornecrantz uint32_t height;
625eb4f923bSJakob Bornecrantz
626eb4f923bSJakob Bornecrantz width = vmw_read(dev_priv, SVGA_REG_WIDTH);
627eb4f923bSJakob Bornecrantz height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
628eb4f923bSJakob Bornecrantz
629df42523cSZack Rusin width = max_t(uint32_t, width, VMWGFX_MIN_INITIAL_WIDTH);
630df42523cSZack Rusin height = max_t(uint32_t, height, VMWGFX_MIN_INITIAL_HEIGHT);
63167d4a87bSThomas Hellstrom
63267d4a87bSThomas Hellstrom if (width > dev_priv->fb_max_width ||
63367d4a87bSThomas Hellstrom height > dev_priv->fb_max_height) {
63467d4a87bSThomas Hellstrom
63567d4a87bSThomas Hellstrom /*
63667d4a87bSThomas Hellstrom * This is a host error and shouldn't occur.
63767d4a87bSThomas Hellstrom */
63867d4a87bSThomas Hellstrom
639df42523cSZack Rusin width = VMWGFX_MIN_INITIAL_WIDTH;
640df42523cSZack Rusin height = VMWGFX_MIN_INITIAL_HEIGHT;
64167d4a87bSThomas Hellstrom }
642eb4f923bSJakob Bornecrantz
643eb4f923bSJakob Bornecrantz dev_priv->initial_width = width;
644eb4f923bSJakob Bornecrantz dev_priv->initial_height = height;
645eb4f923bSJakob Bornecrantz }
646eb4f923bSJakob Bornecrantz
647d92d9851SThomas Hellstrom /**
648d92d9851SThomas Hellstrom * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
649d92d9851SThomas Hellstrom * system.
650d92d9851SThomas Hellstrom *
651d92d9851SThomas Hellstrom * @dev_priv: Pointer to a struct vmw_private
652d92d9851SThomas Hellstrom *
65381103355SThomas Hellstrom * This functions tries to determine what actions need to be taken by the
65481103355SThomas Hellstrom * driver to make system pages visible to the device.
655d92d9851SThomas Hellstrom * If this function decides that DMA is not possible, it returns -EINVAL.
656d92d9851SThomas Hellstrom * The driver may then try to disable features of the device that require
657d92d9851SThomas Hellstrom * DMA.
658d92d9851SThomas Hellstrom */
vmw_dma_select_mode(struct vmw_private * dev_priv)659d92d9851SThomas Hellstrom static int vmw_dma_select_mode(struct vmw_private *dev_priv)
660d92d9851SThomas Hellstrom {
661d92d9851SThomas Hellstrom static const char *names[vmw_dma_map_max] = {
662d92d9851SThomas Hellstrom [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
66381103355SThomas Hellstrom [vmw_dma_map_populate] = "Caching DMA mappings.",
664d92d9851SThomas Hellstrom [vmw_dma_map_bind] = "Giving up DMA mappings early."};
665d92d9851SThomas Hellstrom
66688685c3eSZack Rusin /*
66788685c3eSZack Rusin * When running with SEV we always want dma mappings, because
66888685c3eSZack Rusin * otherwise ttm tt pool pages will bounce through swiotlb running
66988685c3eSZack Rusin * out of available space.
67088685c3eSZack Rusin */
67188685c3eSZack Rusin if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
672d92d9851SThomas Hellstrom dev_priv->map_mode = vmw_dma_alloc_coherent;
67381103355SThomas Hellstrom else if (vmw_restrict_iommu)
67481103355SThomas Hellstrom dev_priv->map_mode = vmw_dma_map_bind;
67505f9467eSChristoph Hellwig else
67605f9467eSChristoph Hellwig dev_priv->map_mode = vmw_dma_map_populate;
67705f9467eSChristoph Hellwig
6782b273544SZack Rusin drm_info(&dev_priv->drm,
6792b273544SZack Rusin "DMA map mode: %s\n", names[dev_priv->map_mode]);
680d92d9851SThomas Hellstrom return 0;
681d92d9851SThomas Hellstrom }
682d92d9851SThomas Hellstrom
6830d00c488SThomas Hellstrom /**
6840d00c488SThomas Hellstrom * vmw_dma_masks - set required page- and dma masks
6850d00c488SThomas Hellstrom *
686e68cefd1SLee Jones * @dev_priv: Pointer to struct drm-device
6870d00c488SThomas Hellstrom *
6880d00c488SThomas Hellstrom * With 32-bit we can only handle 32 bit PFNs. Optionally set that
6890d00c488SThomas Hellstrom * restriction also for 64-bit systems.
6900d00c488SThomas Hellstrom */
vmw_dma_masks(struct vmw_private * dev_priv)6910d00c488SThomas Hellstrom static int vmw_dma_masks(struct vmw_private *dev_priv)
6920d00c488SThomas Hellstrom {
6939703bb32SZack Rusin struct drm_device *dev = &dev_priv->drm;
6944cbfa1e6SThomas Hellstrom int ret = 0;
6950d00c488SThomas Hellstrom
6964cbfa1e6SThomas Hellstrom ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
697f674a218SZack Rusin if (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask) {
6982b273544SZack Rusin drm_info(&dev_priv->drm,
6992b273544SZack Rusin "Restricting DMA addresses to 44 bits.\n");
7004cbfa1e6SThomas Hellstrom return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
7010d00c488SThomas Hellstrom }
7024cbfa1e6SThomas Hellstrom
7034cbfa1e6SThomas Hellstrom return ret;
7040d00c488SThomas Hellstrom }
7050d00c488SThomas Hellstrom
vmw_vram_manager_init(struct vmw_private * dev_priv)706252f8d7bSDave Airlie static int vmw_vram_manager_init(struct vmw_private *dev_priv)
707252f8d7bSDave Airlie {
708252f8d7bSDave Airlie int ret;
7099c3006a4SChristian König ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
7100fe438ceSChristian König dev_priv->vram_size >> PAGE_SHIFT);
7119de59bc2SDave Airlie ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
712252f8d7bSDave Airlie return ret;
713252f8d7bSDave Airlie }
714e0830704SDave Airlie
vmw_vram_manager_fini(struct vmw_private * dev_priv)715e0830704SDave Airlie static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
716e0830704SDave Airlie {
717a3431602SDave Airlie ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
718e0830704SDave Airlie }
719e0830704SDave Airlie
vmw_setup_pci_resources(struct vmw_private * dev,u32 pci_id)7208772c0bbSZack Rusin static int vmw_setup_pci_resources(struct vmw_private *dev,
721baee602eSZack Rusin u32 pci_id)
7228772c0bbSZack Rusin {
7232cd80dbdSZack Rusin resource_size_t rmmio_start;
7242cd80dbdSZack Rusin resource_size_t rmmio_size;
7258772c0bbSZack Rusin resource_size_t fifo_start;
7268772c0bbSZack Rusin resource_size_t fifo_size;
7278772c0bbSZack Rusin int ret;
7288772c0bbSZack Rusin struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
7298772c0bbSZack Rusin
7308772c0bbSZack Rusin pci_set_master(pdev);
7318772c0bbSZack Rusin
7328772c0bbSZack Rusin ret = pci_request_regions(pdev, "vmwgfx probe");
7338772c0bbSZack Rusin if (ret)
7348772c0bbSZack Rusin return ret;
7358772c0bbSZack Rusin
7362cd80dbdSZack Rusin dev->pci_id = pci_id;
7372cd80dbdSZack Rusin if (pci_id == VMWGFX_PCI_ID_SVGA3) {
7382cd80dbdSZack Rusin rmmio_start = pci_resource_start(pdev, 0);
7392cd80dbdSZack Rusin rmmio_size = pci_resource_len(pdev, 0);
7402cd80dbdSZack Rusin dev->vram_start = pci_resource_start(pdev, 2);
7412cd80dbdSZack Rusin dev->vram_size = pci_resource_len(pdev, 2);
7422cd80dbdSZack Rusin
7432b273544SZack Rusin drm_info(&dev->drm,
7442b273544SZack Rusin "Register MMIO at 0x%pa size is %llu kiB\n",
7452cd80dbdSZack Rusin &rmmio_start, (uint64_t)rmmio_size / 1024);
7462cd80dbdSZack Rusin dev->rmmio = devm_ioremap(dev->drm.dev,
7472cd80dbdSZack Rusin rmmio_start,
7482cd80dbdSZack Rusin rmmio_size);
749527a9471SQiheng Lin if (!dev->rmmio) {
7502b273544SZack Rusin drm_err(&dev->drm,
7512b273544SZack Rusin "Failed mapping registers mmio memory.\n");
7522cd80dbdSZack Rusin pci_release_regions(pdev);
753527a9471SQiheng Lin return -ENOMEM;
7542cd80dbdSZack Rusin }
7552cd80dbdSZack Rusin } else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
7568772c0bbSZack Rusin dev->io_start = pci_resource_start(pdev, 0);
7578772c0bbSZack Rusin dev->vram_start = pci_resource_start(pdev, 1);
7588772c0bbSZack Rusin dev->vram_size = pci_resource_len(pdev, 1);
7598772c0bbSZack Rusin fifo_start = pci_resource_start(pdev, 2);
7608772c0bbSZack Rusin fifo_size = pci_resource_len(pdev, 2);
7618772c0bbSZack Rusin
7622b273544SZack Rusin drm_info(&dev->drm,
7632b273544SZack Rusin "FIFO at %pa size is %llu kiB\n",
7648772c0bbSZack Rusin &fifo_start, (uint64_t)fifo_size / 1024);
7658772c0bbSZack Rusin dev->fifo_mem = devm_memremap(dev->drm.dev,
7668772c0bbSZack Rusin fifo_start,
7678772c0bbSZack Rusin fifo_size,
7688772c0bbSZack Rusin MEMREMAP_WB);
7698772c0bbSZack Rusin
770f3ebd4e6SDan Carpenter if (IS_ERR(dev->fifo_mem)) {
7712b273544SZack Rusin drm_err(&dev->drm,
7722b273544SZack Rusin "Failed mapping FIFO memory.\n");
773f3ebd4e6SDan Carpenter pci_release_regions(pdev);
774f3ebd4e6SDan Carpenter return PTR_ERR(dev->fifo_mem);
7758772c0bbSZack Rusin }
7762cd80dbdSZack Rusin } else {
7772cd80dbdSZack Rusin pci_release_regions(pdev);
7782cd80dbdSZack Rusin return -EINVAL;
7792cd80dbdSZack Rusin }
7808772c0bbSZack Rusin
7818772c0bbSZack Rusin /*
7828772c0bbSZack Rusin * This is approximate size of the vram, the exact size will only
7838772c0bbSZack Rusin * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
7848772c0bbSZack Rusin * size will be equal to or bigger than the size reported by
7858772c0bbSZack Rusin * SVGA_REG_VRAM_SIZE.
7868772c0bbSZack Rusin */
7872b273544SZack Rusin drm_info(&dev->drm,
7882b273544SZack Rusin "VRAM at %pa size is %llu kiB\n",
7898772c0bbSZack Rusin &dev->vram_start, (uint64_t)dev->vram_size / 1024);
7908772c0bbSZack Rusin
7918772c0bbSZack Rusin return 0;
7928772c0bbSZack Rusin }
7938772c0bbSZack Rusin
vmw_detect_version(struct vmw_private * dev)7948772c0bbSZack Rusin static int vmw_detect_version(struct vmw_private *dev)
7958772c0bbSZack Rusin {
7968772c0bbSZack Rusin uint32_t svga_id;
7978772c0bbSZack Rusin
7982cd80dbdSZack Rusin vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ?
7992cd80dbdSZack Rusin SVGA_ID_3 : SVGA_ID_2);
8008772c0bbSZack Rusin svga_id = vmw_read(dev, SVGA_REG_ID);
8012cd80dbdSZack Rusin if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) {
8022b273544SZack Rusin drm_err(&dev->drm,
8032b273544SZack Rusin "Unsupported SVGA ID 0x%x on chipset 0x%x\n",
804baee602eSZack Rusin svga_id, dev->pci_id);
8058772c0bbSZack Rusin return -ENOSYS;
8068772c0bbSZack Rusin }
8072cd80dbdSZack Rusin BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3));
8082b273544SZack Rusin drm_info(&dev->drm,
8092b273544SZack Rusin "Running on SVGA version %d.\n", (svga_id & 0xff));
8108772c0bbSZack Rusin return 0;
8118772c0bbSZack Rusin }
8128772c0bbSZack Rusin
vmw_write_driver_id(struct vmw_private * dev)8137f4c3377SZack Rusin static void vmw_write_driver_id(struct vmw_private *dev)
8147f4c3377SZack Rusin {
8157f4c3377SZack Rusin if ((dev->capabilities2 & SVGA_CAP2_DX2) != 0) {
8167f4c3377SZack Rusin vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID,
8177f4c3377SZack Rusin SVGA_REG_GUEST_DRIVER_ID_LINUX);
8187f4c3377SZack Rusin
8197f4c3377SZack Rusin vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION1,
8207f4c3377SZack Rusin LINUX_VERSION_MAJOR << 24 |
8217f4c3377SZack Rusin LINUX_VERSION_PATCHLEVEL << 16 |
8227f4c3377SZack Rusin LINUX_VERSION_SUBLEVEL);
8237f4c3377SZack Rusin vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION2,
8247f4c3377SZack Rusin VMWGFX_DRIVER_MAJOR << 24 |
8257f4c3377SZack Rusin VMWGFX_DRIVER_MINOR << 16 |
8267f4c3377SZack Rusin VMWGFX_DRIVER_PATCHLEVEL);
8277f4c3377SZack Rusin vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION3, 0);
8287f4c3377SZack Rusin
8297f4c3377SZack Rusin vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID,
8307f4c3377SZack Rusin SVGA_REG_GUEST_DRIVER_ID_SUBMIT);
8317f4c3377SZack Rusin }
8327f4c3377SZack Rusin }
8337f4c3377SZack Rusin
vmw_sw_context_init(struct vmw_private * dev_priv)8349e931f2eSMaaz Mombasawala static void vmw_sw_context_init(struct vmw_private *dev_priv)
8359e931f2eSMaaz Mombasawala {
8369e931f2eSMaaz Mombasawala struct vmw_sw_context *sw_context = &dev_priv->ctx;
8379e931f2eSMaaz Mombasawala
8389e931f2eSMaaz Mombasawala hash_init(sw_context->res_ht);
8399e931f2eSMaaz Mombasawala }
8409e931f2eSMaaz Mombasawala
vmw_sw_context_fini(struct vmw_private * dev_priv)8419e931f2eSMaaz Mombasawala static void vmw_sw_context_fini(struct vmw_private *dev_priv)
8429e931f2eSMaaz Mombasawala {
8439e931f2eSMaaz Mombasawala struct vmw_sw_context *sw_context = &dev_priv->ctx;
8449e931f2eSMaaz Mombasawala
8459e931f2eSMaaz Mombasawala vfree(sw_context->cmd_bounce);
8469e931f2eSMaaz Mombasawala if (sw_context->staged_bindings)
8479e931f2eSMaaz Mombasawala vmw_binding_state_free(sw_context->staged_bindings);
8489e931f2eSMaaz Mombasawala }
8499e931f2eSMaaz Mombasawala
vmw_driver_load(struct vmw_private * dev_priv,u32 pci_id)8508772c0bbSZack Rusin static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
851fb1d9738SJakob Bornecrantz {
852fb1d9738SJakob Bornecrantz int ret;
853c0951b79SThomas Hellstrom enum vmw_res_type i;
854d92d9851SThomas Hellstrom bool refuse_dma = false;
8559703bb32SZack Rusin struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
856fb1d9738SJakob Bornecrantz
8578772c0bbSZack Rusin dev_priv->drm.dev_private = dev_priv;
8588772c0bbSZack Rusin
8599e931f2eSMaaz Mombasawala vmw_sw_context_init(dev_priv);
8609e931f2eSMaaz Mombasawala
86168ce556bSZack Rusin mutex_init(&dev_priv->cmdbuf_mutex);
86268ce556bSZack Rusin mutex_init(&dev_priv->binding_mutex);
86368ce556bSZack Rusin spin_lock_init(&dev_priv->resource_lock);
86468ce556bSZack Rusin spin_lock_init(&dev_priv->hw_lock);
86568ce556bSZack Rusin spin_lock_init(&dev_priv->waiter_lock);
86668ce556bSZack Rusin spin_lock_init(&dev_priv->cursor_lock);
86768ce556bSZack Rusin
8688772c0bbSZack Rusin ret = vmw_setup_pci_resources(dev_priv, pci_id);
8698772c0bbSZack Rusin if (ret)
8708772c0bbSZack Rusin return ret;
8718772c0bbSZack Rusin ret = vmw_detect_version(dev_priv);
8728772c0bbSZack Rusin if (ret)
87375ec69c7SZack Rusin goto out_no_pci_or_version;
8748772c0bbSZack Rusin
875c0951b79SThomas Hellstrom
876c0951b79SThomas Hellstrom for (i = vmw_res_context; i < vmw_res_max; ++i) {
877aec70c39SDeepak R Varma idr_init_base(&dev_priv->res_idr[i], 1);
878c0951b79SThomas Hellstrom INIT_LIST_HEAD(&dev_priv->res_lru[i]);
879c0951b79SThomas Hellstrom }
880c0951b79SThomas Hellstrom
881fb1d9738SJakob Bornecrantz init_waitqueue_head(&dev_priv->fence_queue);
882fb1d9738SJakob Bornecrantz init_waitqueue_head(&dev_priv->fifo_queue);
8834f73a96bSThomas Hellstrom dev_priv->fence_queue_waiters = 0;
884d2e8851aSThomas Hellstrom dev_priv->fifo_queue_waiters = 0;
885c0951b79SThomas Hellstrom
8865bb39e81SThomas Hellstrom dev_priv->used_memory_size = 0;
887fb1d9738SJakob Bornecrantz
88804319d89SSinclair Yeh dev_priv->assume_16bpp = !!vmw_assume_16bpp;
88904319d89SSinclair Yeh
890fb1d9738SJakob Bornecrantz dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
891af326e28SZack Rusin vmw_print_bitmap(&dev_priv->drm, "Capabilities",
892af326e28SZack Rusin dev_priv->capabilities,
893af326e28SZack Rusin cap1_names, ARRAY_SIZE(cap1_names));
8943b4c2511SNeha Bhende if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
8953b4c2511SNeha Bhende dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
896af326e28SZack Rusin vmw_print_bitmap(&dev_priv->drm, "Capabilities2",
897af326e28SZack Rusin dev_priv->capabilities2,
898af326e28SZack Rusin cap2_names, ARRAY_SIZE(cap2_names));
8993b4c2511SNeha Bhende }
9003b4c2511SNeha Bhende
90135d86fb6SZack Rusin if (!vmwgfx_supported(dev_priv)) {
90235d86fb6SZack Rusin vmw_disable_backdoor();
90335d86fb6SZack Rusin drm_err_once(&dev_priv->drm,
90435d86fb6SZack Rusin "vmwgfx seems to be running on an unsupported hypervisor.");
90535d86fb6SZack Rusin drm_err_once(&dev_priv->drm,
90635d86fb6SZack Rusin "This configuration is likely broken.");
90735d86fb6SZack Rusin drm_err_once(&dev_priv->drm,
90835d86fb6SZack Rusin "Please switch to a supported graphics device to avoid problems.");
90935d86fb6SZack Rusin }
91035d86fb6SZack Rusin
911d92d9851SThomas Hellstrom ret = vmw_dma_select_mode(dev_priv);
912d92d9851SThomas Hellstrom if (unlikely(ret != 0)) {
9132b273544SZack Rusin drm_info(&dev_priv->drm,
9142b273544SZack Rusin "Restricting capabilities since DMA not available.\n");
915d92d9851SThomas Hellstrom refuse_dma = true;
91681a00960SThomas Hellstrom if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
9172b273544SZack Rusin drm_info(&dev_priv->drm,
9182b273544SZack Rusin "Disabling 3D acceleration.\n");
919d92d9851SThomas Hellstrom }
920fb1d9738SJakob Bornecrantz
9215bb39e81SThomas Hellstrom dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
922be4f77acSZack Rusin dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
9235bb39e81SThomas Hellstrom dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
9245bb39e81SThomas Hellstrom dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
925eb4f923bSJakob Bornecrantz
926eb4f923bSJakob Bornecrantz vmw_get_initial_size(dev_priv);
927eb4f923bSJakob Bornecrantz
9280d00c488SThomas Hellstrom if (dev_priv->capabilities & SVGA_CAP_GMR2) {
929fb1d9738SJakob Bornecrantz dev_priv->max_gmr_ids =
930fb1d9738SJakob Bornecrantz vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
931fb17f189SThomas Hellstrom dev_priv->max_gmr_pages =
932fb17f189SThomas Hellstrom vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
933fb17f189SThomas Hellstrom dev_priv->memory_size =
934fb17f189SThomas Hellstrom vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
9355bb39e81SThomas Hellstrom dev_priv->memory_size -= dev_priv->vram_size;
9365bb39e81SThomas Hellstrom } else {
9375bb39e81SThomas Hellstrom /*
9385bb39e81SThomas Hellstrom * An arbitrary limit of 512MiB on surface
9395bb39e81SThomas Hellstrom * memory. But all HWV8 hardware supports GMR2.
9405bb39e81SThomas Hellstrom */
9415bb39e81SThomas Hellstrom dev_priv->memory_size = 512*1024*1024;
942fb17f189SThomas Hellstrom }
9436da768aaSThomas Hellstrom dev_priv->max_mob_pages = 0;
944857aea1cSCharmaine Lee dev_priv->max_mob_size = 0;
9456da768aaSThomas Hellstrom if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
9467ebb47c9SDeepak Rawat uint64_t mem_size;
9477ebb47c9SDeepak Rawat
9487ebb47c9SDeepak Rawat if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
9497ebb47c9SDeepak Rawat mem_size = vmw_read(dev_priv,
9507ebb47c9SDeepak Rawat SVGA_REG_GBOBJECT_MEM_SIZE_KB);
9517ebb47c9SDeepak Rawat else
9527ebb47c9SDeepak Rawat mem_size =
9536da768aaSThomas Hellstrom vmw_read(dev_priv,
9546da768aaSThomas Hellstrom SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
9556da768aaSThomas Hellstrom
9566da768aaSThomas Hellstrom dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
957ebc9ac7cSZack Rusin dev_priv->max_primary_mem =
958ebc9ac7cSZack Rusin vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
959857aea1cSCharmaine Lee dev_priv->max_mob_size =
960857aea1cSCharmaine Lee vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
96135c05125SSinclair Yeh dev_priv->stdu_max_width =
96235c05125SSinclair Yeh vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
96335c05125SSinclair Yeh dev_priv->stdu_max_height =
96435c05125SSinclair Yeh vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
96535c05125SSinclair Yeh
96635c05125SSinclair Yeh vmw_write(dev_priv, SVGA_REG_DEV_CAP,
96735c05125SSinclair Yeh SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
96835c05125SSinclair Yeh dev_priv->texture_max_width = vmw_read(dev_priv,
96935c05125SSinclair Yeh SVGA_REG_DEV_CAP);
97035c05125SSinclair Yeh vmw_write(dev_priv, SVGA_REG_DEV_CAP,
97135c05125SSinclair Yeh SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
97235c05125SSinclair Yeh dev_priv->texture_max_height = vmw_read(dev_priv,
97335c05125SSinclair Yeh SVGA_REG_DEV_CAP);
974df45e9d4SThomas Hellstrom } else {
975df45e9d4SThomas Hellstrom dev_priv->texture_max_width = 8192;
976df45e9d4SThomas Hellstrom dev_priv->texture_max_height = 8192;
977ebc9ac7cSZack Rusin dev_priv->max_primary_mem = dev_priv->vram_size;
978df45e9d4SThomas Hellstrom }
979cfdc3458SZack Rusin drm_info(&dev_priv->drm,
980cfdc3458SZack Rusin "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n",
981cfdc3458SZack Rusin (u64)dev_priv->vram_size / 1024,
982cfdc3458SZack Rusin (u64)dev_priv->fifo_mem_size / 1024,
983cfdc3458SZack Rusin dev_priv->memory_size / 1024);
984cfdc3458SZack Rusin
985cfdc3458SZack Rusin drm_info(&dev_priv->drm,
986cfdc3458SZack Rusin "MOB limits: max mob size = %u kB, max mob pages = %u\n",
987cfdc3458SZack Rusin dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages);
988fb1d9738SJakob Bornecrantz
9890d00c488SThomas Hellstrom ret = vmw_dma_masks(dev_priv);
990496eb6fdSThomas Hellstrom if (unlikely(ret != 0))
9910d00c488SThomas Hellstrom goto out_err0;
9920d00c488SThomas Hellstrom
9939703bb32SZack Rusin dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
99439916897SQian Cai
9950d00c488SThomas Hellstrom if (dev_priv->capabilities & SVGA_CAP_GMR2) {
9962b273544SZack Rusin drm_info(&dev_priv->drm,
9972b273544SZack Rusin "Max GMR ids is %u\n",
998fb1d9738SJakob Bornecrantz (unsigned)dev_priv->max_gmr_ids);
9992b273544SZack Rusin drm_info(&dev_priv->drm,
10002b273544SZack Rusin "Max number of GMR pages is %u\n",
1001fb17f189SThomas Hellstrom (unsigned)dev_priv->max_gmr_pages);
1002fb17f189SThomas Hellstrom }
10032b273544SZack Rusin drm_info(&dev_priv->drm,
10042b273544SZack Rusin "Maximum display memory size is %llu kiB\n",
1005ebc9ac7cSZack Rusin (uint64_t)dev_priv->max_primary_mem / 1024);
1006fb1d9738SJakob Bornecrantz
1007d7e1958dSJakob Bornecrantz /* Need mmio memory to check for fifo pitchlock cap. */
1008d7e1958dSJakob Bornecrantz if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
1009d7e1958dSJakob Bornecrantz !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
1010d7e1958dSJakob Bornecrantz !vmw_fifo_have_pitchlock(dev_priv)) {
1011d7e1958dSJakob Bornecrantz ret = -ENOSYS;
1012d7e1958dSJakob Bornecrantz DRM_ERROR("Hardware has no pitchlock\n");
1013be4f77acSZack Rusin goto out_err0;
1014d7e1958dSJakob Bornecrantz }
1015d7e1958dSJakob Bornecrantz
1016931e09d8SMaaz Mombasawala dev_priv->tdev = ttm_object_device_init(&vmw_prime_dmabuf_ops);
1017fb1d9738SJakob Bornecrantz
1018fb1d9738SJakob Bornecrantz if (unlikely(dev_priv->tdev == NULL)) {
10192b273544SZack Rusin drm_err(&dev_priv->drm,
10202b273544SZack Rusin "Unable to initialize TTM object management.\n");
1021fb1d9738SJakob Bornecrantz ret = -ENOMEM;
1022be4f77acSZack Rusin goto out_err0;
1023fb1d9738SJakob Bornecrantz }
1024fb1d9738SJakob Bornecrantz
1025506ff75cSThomas Hellstrom if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
102632160e6aSZack Rusin ret = vmw_irq_install(dev_priv);
1027506ff75cSThomas Hellstrom if (ret != 0) {
10282b273544SZack Rusin drm_err(&dev_priv->drm,
10292b273544SZack Rusin "Failed installing irq: %d\n", ret);
1030506ff75cSThomas Hellstrom goto out_no_irq;
1031506ff75cSThomas Hellstrom }
1032506ff75cSThomas Hellstrom }
1033506ff75cSThomas Hellstrom
1034ae2a1040SThomas Hellstrom dev_priv->fman = vmw_fence_manager_init(dev_priv);
103514bbf20cSWei Yongjun if (unlikely(dev_priv->fman == NULL)) {
103614bbf20cSWei Yongjun ret = -ENOMEM;
1037ae2a1040SThomas Hellstrom goto out_no_fman;
103814bbf20cSWei Yongjun }
103956d1c78dSJakob Bornecrantz
10408af8a109SChristian König ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
10419703bb32SZack Rusin dev_priv->drm.dev,
10429703bb32SZack Rusin dev_priv->drm.anon_inode->i_mapping,
1043298799a2SZack Rusin dev_priv->drm.vma_offset_manager,
1044ee5d2a8eSChristian König dev_priv->map_mode == vmw_dma_alloc_coherent,
1045153b3d5bSThomas Hellstrom false);
1046153b3d5bSThomas Hellstrom if (unlikely(ret != 0)) {
10472b273544SZack Rusin drm_err(&dev_priv->drm,
10482b273544SZack Rusin "Failed initializing TTM buffer object driver.\n");
1049153b3d5bSThomas Hellstrom goto out_no_bdev;
1050153b3d5bSThomas Hellstrom }
10513458390bSThomas Hellstrom
1052153b3d5bSThomas Hellstrom /*
1053153b3d5bSThomas Hellstrom * Enable VRAM, but initially don't use it until SVGA is enabled and
1054153b3d5bSThomas Hellstrom * unhidden.
1055153b3d5bSThomas Hellstrom */
1056252f8d7bSDave Airlie
1057252f8d7bSDave Airlie ret = vmw_vram_manager_init(dev_priv);
10583458390bSThomas Hellstrom if (unlikely(ret != 0)) {
10592b273544SZack Rusin drm_err(&dev_priv->drm,
10602b273544SZack Rusin "Failed initializing memory manager for VRAM.\n");
10613458390bSThomas Hellstrom goto out_no_vram;
10623458390bSThomas Hellstrom }
10633458390bSThomas Hellstrom
1064d92223eaSZack Rusin ret = vmw_devcaps_create(dev_priv);
1065d92223eaSZack Rusin if (unlikely(ret != 0)) {
10662b273544SZack Rusin drm_err(&dev_priv->drm,
10672b273544SZack Rusin "Failed initializing device caps.\n");
1068d92223eaSZack Rusin goto out_no_vram;
1069d92223eaSZack Rusin }
1070d92223eaSZack Rusin
10713629ca5dSChristian König /*
10723629ca5dSChristian König * "Guest Memory Regions" is an aperture like feature with
10733629ca5dSChristian König * one slot per bo. There is an upper limit of the number of
10743629ca5dSChristian König * slots as well as the bo size.
10753629ca5dSChristian König */
10763458390bSThomas Hellstrom dev_priv->has_gmr = true;
10773629ca5dSChristian König /* TODO: This is most likely not correct */
10783458390bSThomas Hellstrom if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
107962161778SDave Airlie refuse_dma ||
108062161778SDave Airlie vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
10812b273544SZack Rusin drm_info(&dev_priv->drm,
10822b273544SZack Rusin "No GMR memory available. "
10833458390bSThomas Hellstrom "Graphics memory resources are very limited.\n");
10843458390bSThomas Hellstrom dev_priv->has_gmr = false;
10853458390bSThomas Hellstrom }
10863458390bSThomas Hellstrom
108781a00960SThomas Hellstrom if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
10883458390bSThomas Hellstrom dev_priv->has_mob = true;
108962161778SDave Airlie
109062161778SDave Airlie if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
10912b273544SZack Rusin drm_info(&dev_priv->drm,
10922b273544SZack Rusin "No MOB memory available. "
10933458390bSThomas Hellstrom "3D will be disabled.\n");
10943458390bSThomas Hellstrom dev_priv->has_mob = false;
10953458390bSThomas Hellstrom }
1096f6be2326SZack Rusin if (vmw_sys_man_init(dev_priv) != 0) {
1097f6be2326SZack Rusin drm_info(&dev_priv->drm,
1098f6be2326SZack Rusin "No MOB page table memory available. "
1099f6be2326SZack Rusin "3D will be disabled.\n");
1100f6be2326SZack Rusin dev_priv->has_mob = false;
1101f6be2326SZack Rusin }
11023458390bSThomas Hellstrom }
11033458390bSThomas Hellstrom
1104ef7c7b74SDeepak Rawat if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
1105d92223eaSZack Rusin if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_DXCONTEXT))
1106878c6ecdSDeepak Rawat dev_priv->sm_type = VMW_SM_4;
1107d80efd5cSThomas Hellstrom }
110856d1c78dSJakob Bornecrantz
1109878c6ecdSDeepak Rawat /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
1110878c6ecdSDeepak Rawat if (has_sm4_context(dev_priv) &&
1111878c6ecdSDeepak Rawat (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
1112d92223eaSZack Rusin if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM41))
1113878c6ecdSDeepak Rawat dev_priv->sm_type = VMW_SM_4_1;
11144dec2805SDeepak Rawat if (has_sm4_1_context(dev_priv) &&
11154dec2805SDeepak Rawat (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
11164fb9326bSZack Rusin if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) {
11174dec2805SDeepak Rawat dev_priv->sm_type = VMW_SM_5;
11184fb9326bSZack Rusin if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43))
11194fb9326bSZack Rusin dev_priv->sm_type = VMW_SM_5_1X;
11204fb9326bSZack Rusin }
11214dec2805SDeepak Rawat }
1122878c6ecdSDeepak Rawat }
1123878c6ecdSDeepak Rawat
11247a1c2f6cSThomas Hellstrom ret = vmw_kms_init(dev_priv);
11257a1c2f6cSThomas Hellstrom if (unlikely(ret != 0))
11267a1c2f6cSThomas Hellstrom goto out_no_kms;
1127fb1d9738SJakob Bornecrantz vmw_overlay_init(dev_priv);
112856d1c78dSJakob Bornecrantz
1129153b3d5bSThomas Hellstrom ret = vmw_request_device(dev_priv);
1130153b3d5bSThomas Hellstrom if (ret)
1131506ff75cSThomas Hellstrom goto out_no_fifo;
1132153b3d5bSThomas Hellstrom
11332b273544SZack Rusin vmw_print_sm_type(dev_priv);
1134523375c9SZack Rusin vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
1135f9217913SSinclair Yeh VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
1136523375c9SZack Rusin VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
11377f4c3377SZack Rusin vmw_write_driver_id(dev_priv);
1138f9217913SSinclair Yeh
1139d9f36a00SThomas Hellstrom dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
1140d9f36a00SThomas Hellstrom register_pm_notifier(&dev_priv->pm_nb);
1141d9f36a00SThomas Hellstrom
1142fb1d9738SJakob Bornecrantz return 0;
1143fb1d9738SJakob Bornecrantz
1144506ff75cSThomas Hellstrom out_no_fifo:
114556d1c78dSJakob Bornecrantz vmw_overlay_close(dev_priv);
114656d1c78dSJakob Bornecrantz vmw_kms_close(dev_priv);
114756d1c78dSJakob Bornecrantz out_no_kms:
1148f6be2326SZack Rusin if (dev_priv->has_mob) {
11496eee6675SDave Airlie vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1150f6be2326SZack Rusin vmw_sys_man_fini(dev_priv);
1151f6be2326SZack Rusin }
11523458390bSThomas Hellstrom if (dev_priv->has_gmr)
11536eee6675SDave Airlie vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1154d92223eaSZack Rusin vmw_devcaps_destroy(dev_priv);
1155e0830704SDave Airlie vmw_vram_manager_fini(dev_priv);
11563458390bSThomas Hellstrom out_no_vram:
11578af8a109SChristian König ttm_device_fini(&dev_priv->bdev);
1158153b3d5bSThomas Hellstrom out_no_bdev:
1159ae2a1040SThomas Hellstrom vmw_fence_manager_takedown(dev_priv->fman);
1160ae2a1040SThomas Hellstrom out_no_fman:
1161506ff75cSThomas Hellstrom if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
11629703bb32SZack Rusin vmw_irq_uninstall(&dev_priv->drm);
1163506ff75cSThomas Hellstrom out_no_irq:
1164fb1d9738SJakob Bornecrantz ttm_object_device_release(&dev_priv->tdev);
1165fb1d9738SJakob Bornecrantz out_err0:
1166c0951b79SThomas Hellstrom for (i = vmw_res_context; i < vmw_res_max; ++i)
1167c0951b79SThomas Hellstrom idr_destroy(&dev_priv->res_idr[i]);
1168c0951b79SThomas Hellstrom
1169d80efd5cSThomas Hellstrom if (dev_priv->ctx.staged_bindings)
1170d80efd5cSThomas Hellstrom vmw_binding_state_free(dev_priv->ctx.staged_bindings);
117175ec69c7SZack Rusin out_no_pci_or_version:
117275ec69c7SZack Rusin pci_release_regions(pdev);
1173fb1d9738SJakob Bornecrantz return ret;
1174fb1d9738SJakob Bornecrantz }
1175fb1d9738SJakob Bornecrantz
vmw_driver_unload(struct drm_device * dev)117611b3c20bSGabriel Krisman Bertazi static void vmw_driver_unload(struct drm_device *dev)
1177fb1d9738SJakob Bornecrantz {
1178fb1d9738SJakob Bornecrantz struct vmw_private *dev_priv = vmw_priv(dev);
1179840462e6SThomas Zimmermann struct pci_dev *pdev = to_pci_dev(dev->dev);
1180c0951b79SThomas Hellstrom enum vmw_res_type i;
1181fb1d9738SJakob Bornecrantz
1182d9f36a00SThomas Hellstrom unregister_pm_notifier(&dev_priv->pm_nb);
1183d9f36a00SThomas Hellstrom
11849e931f2eSMaaz Mombasawala vmw_sw_context_fini(dev_priv);
1185153b3d5bSThomas Hellstrom vmw_fifo_resource_dec(dev_priv);
1186df42523cSZack Rusin
1187153b3d5bSThomas Hellstrom vmw_svga_disable(dev_priv);
1188153b3d5bSThomas Hellstrom
1189fb1d9738SJakob Bornecrantz vmw_kms_close(dev_priv);
1190fb1d9738SJakob Bornecrantz vmw_overlay_close(dev_priv);
11913458390bSThomas Hellstrom
11923458390bSThomas Hellstrom if (dev_priv->has_gmr)
11936eee6675SDave Airlie vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
11943458390bSThomas Hellstrom
1195153b3d5bSThomas Hellstrom vmw_release_device_early(dev_priv);
1196f6be2326SZack Rusin if (dev_priv->has_mob) {
11976eee6675SDave Airlie vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1198f6be2326SZack Rusin vmw_sys_man_fini(dev_priv);
1199f6be2326SZack Rusin }
1200d92223eaSZack Rusin vmw_devcaps_destroy(dev_priv);
1201e0830704SDave Airlie vmw_vram_manager_fini(dev_priv);
12028af8a109SChristian König ttm_device_fini(&dev_priv->bdev);
1203153b3d5bSThomas Hellstrom vmw_release_device_late(dev_priv);
1204ae2a1040SThomas Hellstrom vmw_fence_manager_takedown(dev_priv->fman);
1205506ff75cSThomas Hellstrom if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
12069703bb32SZack Rusin vmw_irq_uninstall(&dev_priv->drm);
1207f2d12b8eSThomas Hellstrom
1208fb1d9738SJakob Bornecrantz ttm_object_device_release(&dev_priv->tdev);
1209c0951b79SThomas Hellstrom
1210c0951b79SThomas Hellstrom for (i = vmw_res_context; i < vmw_res_max; ++i)
1211c0951b79SThomas Hellstrom idr_destroy(&dev_priv->res_idr[i]);
1212fb1d9738SJakob Bornecrantz
12137a7a933eSMartin Krastev vmw_mksstat_remove_all(dev_priv);
12147a7a933eSMartin Krastev
121575ec69c7SZack Rusin pci_release_regions(pdev);
1216fb1d9738SJakob Bornecrantz }
1217fb1d9738SJakob Bornecrantz
vmw_postclose(struct drm_device * dev,struct drm_file * file_priv)1218fb1d9738SJakob Bornecrantz static void vmw_postclose(struct drm_device *dev,
1219fb1d9738SJakob Bornecrantz struct drm_file *file_priv)
1220fb1d9738SJakob Bornecrantz {
12219c84aebaSThomas Hellstrom struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1222c4249855SThomas Hellstrom
1223c4249855SThomas Hellstrom ttm_object_file_release(&vmw_fp->tfile);
1224fb1d9738SJakob Bornecrantz kfree(vmw_fp);
1225fb1d9738SJakob Bornecrantz }
1226fb1d9738SJakob Bornecrantz
vmw_driver_open(struct drm_device * dev,struct drm_file * file_priv)1227fb1d9738SJakob Bornecrantz static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1228fb1d9738SJakob Bornecrantz {
1229fb1d9738SJakob Bornecrantz struct vmw_private *dev_priv = vmw_priv(dev);
1230fb1d9738SJakob Bornecrantz struct vmw_fpriv *vmw_fp;
1231fb1d9738SJakob Bornecrantz int ret = -ENOMEM;
1232fb1d9738SJakob Bornecrantz
1233fb1d9738SJakob Bornecrantz vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
12341a4adb05SRavikant B Sharma if (unlikely(!vmw_fp))
1235fb1d9738SJakob Bornecrantz return ret;
1236fb1d9738SJakob Bornecrantz
123776a9e07fSMaaz Mombasawala vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev);
1238fb1d9738SJakob Bornecrantz if (unlikely(vmw_fp->tfile == NULL))
1239fb1d9738SJakob Bornecrantz goto out_no_tfile;
1240fb1d9738SJakob Bornecrantz
1241fb1d9738SJakob Bornecrantz file_priv->driver_priv = vmw_fp;
1242fb1d9738SJakob Bornecrantz
1243fb1d9738SJakob Bornecrantz return 0;
1244fb1d9738SJakob Bornecrantz
1245fb1d9738SJakob Bornecrantz out_no_tfile:
1246fb1d9738SJakob Bornecrantz kfree(vmw_fp);
1247fb1d9738SJakob Bornecrantz return ret;
1248fb1d9738SJakob Bornecrantz }
1249fb1d9738SJakob Bornecrantz
vmw_generic_ioctl(struct file * filp,unsigned int cmd,unsigned long arg,long (* ioctl_func)(struct file *,unsigned int,unsigned long))125064190bdeSThomas Hellstrom static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
125164190bdeSThomas Hellstrom unsigned long arg,
125264190bdeSThomas Hellstrom long (*ioctl_func)(struct file *, unsigned int,
125364190bdeSThomas Hellstrom unsigned long))
1254fb1d9738SJakob Bornecrantz {
1255fb1d9738SJakob Bornecrantz struct drm_file *file_priv = filp->private_data;
1256fb1d9738SJakob Bornecrantz struct drm_device *dev = file_priv->minor->dev;
1257fb1d9738SJakob Bornecrantz unsigned int nr = DRM_IOCTL_NR(cmd);
125864190bdeSThomas Hellstrom unsigned int flags;
1259fb1d9738SJakob Bornecrantz
1260fb1d9738SJakob Bornecrantz /*
1261e1f78003SThomas Hellstrom * Do extra checking on driver private ioctls.
1262fb1d9738SJakob Bornecrantz */
1263fb1d9738SJakob Bornecrantz
1264fb1d9738SJakob Bornecrantz if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1265fb1d9738SJakob Bornecrantz && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1266baa70943SRob Clark const struct drm_ioctl_desc *ioctl =
1267fb1d9738SJakob Bornecrantz &vmw_ioctls[nr - DRM_COMMAND_BASE];
1268fb1d9738SJakob Bornecrantz
1269d80efd5cSThomas Hellstrom if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1270cbfbe47fSEmil Velikov return ioctl_func(filp, cmd, arg);
127131788ca8SThomas Hellstrom } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
127231788ca8SThomas Hellstrom if (!drm_is_current_master(file_priv) &&
127331788ca8SThomas Hellstrom !capable(CAP_SYS_ADMIN))
127431788ca8SThomas Hellstrom return -EACCES;
1275fb1d9738SJakob Bornecrantz }
1276d80efd5cSThomas Hellstrom
1277d80efd5cSThomas Hellstrom if (unlikely(ioctl->cmd != cmd))
1278d80efd5cSThomas Hellstrom goto out_io_encoding;
1279d80efd5cSThomas Hellstrom
128064190bdeSThomas Hellstrom flags = ioctl->flags;
128164190bdeSThomas Hellstrom } else if (!drm_ioctl_flags(nr, &flags))
128264190bdeSThomas Hellstrom return -EINVAL;
128364190bdeSThomas Hellstrom
12849c84aebaSThomas Hellstrom return ioctl_func(filp, cmd, arg);
1285d80efd5cSThomas Hellstrom
1286d80efd5cSThomas Hellstrom out_io_encoding:
1287d80efd5cSThomas Hellstrom DRM_ERROR("Invalid command format, ioctl %d\n",
1288d80efd5cSThomas Hellstrom nr - DRM_COMMAND_BASE);
1289d80efd5cSThomas Hellstrom
1290d80efd5cSThomas Hellstrom return -EINVAL;
1291fb1d9738SJakob Bornecrantz }
1292fb1d9738SJakob Bornecrantz
vmw_unlocked_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)129364190bdeSThomas Hellstrom static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
129464190bdeSThomas Hellstrom unsigned long arg)
129564190bdeSThomas Hellstrom {
129664190bdeSThomas Hellstrom return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
129764190bdeSThomas Hellstrom }
129864190bdeSThomas Hellstrom
129964190bdeSThomas Hellstrom #ifdef CONFIG_COMPAT
vmw_compat_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)130064190bdeSThomas Hellstrom static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
130164190bdeSThomas Hellstrom unsigned long arg)
130264190bdeSThomas Hellstrom {
130364190bdeSThomas Hellstrom return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
130464190bdeSThomas Hellstrom }
130564190bdeSThomas Hellstrom #endif
130664190bdeSThomas Hellstrom
vmw_master_set(struct drm_device * dev,struct drm_file * file_priv,bool from_open)1307907f5320SEmil Velikov static void vmw_master_set(struct drm_device *dev,
1308fb1d9738SJakob Bornecrantz struct drm_file *file_priv,
1309fb1d9738SJakob Bornecrantz bool from_open)
1310fb1d9738SJakob Bornecrantz {
131163cb4444SThomas Hellstrom /*
131263cb4444SThomas Hellstrom * Inform a new master that the layout may have changed while
131363cb4444SThomas Hellstrom * it was gone.
131463cb4444SThomas Hellstrom */
131563cb4444SThomas Hellstrom if (!from_open)
13165ea17348SThomas Hellstrom drm_sysfs_hotplug_event(dev);
1317fb1d9738SJakob Bornecrantz }
1318fb1d9738SJakob Bornecrantz
vmw_master_drop(struct drm_device * dev,struct drm_file * file_priv)1319fb1d9738SJakob Bornecrantz static void vmw_master_drop(struct drm_device *dev,
1320d6ed682eSDaniel Vetter struct drm_file *file_priv)
1321fb1d9738SJakob Bornecrantz {
1322fb1d9738SJakob Bornecrantz struct vmw_private *dev_priv = vmw_priv(dev);
1323fb1d9738SJakob Bornecrantz
13248fbf9d92SThomas Hellstrom vmw_kms_legacy_hotspot_clear(dev_priv);
1325fb1d9738SJakob Bornecrantz }
1326fb1d9738SJakob Bornecrantz
vmwgfx_supported(struct vmw_private * vmw)132735d86fb6SZack Rusin bool vmwgfx_supported(struct vmw_private *vmw)
132835d86fb6SZack Rusin {
132935d86fb6SZack Rusin #if defined(CONFIG_X86)
133035d86fb6SZack Rusin return hypervisor_is_type(X86_HYPER_VMWARE);
133135d86fb6SZack Rusin #elif defined(CONFIG_ARM64)
133235d86fb6SZack Rusin /*
133335d86fb6SZack Rusin * On aarch64 only svga3 is supported
133435d86fb6SZack Rusin */
133535d86fb6SZack Rusin return vmw->pci_id == VMWGFX_PCI_ID_SVGA3;
133635d86fb6SZack Rusin #else
133735d86fb6SZack Rusin drm_warn_once(&vmw->drm,
133835d86fb6SZack Rusin "vmwgfx is running on an unknown architecture.");
133935d86fb6SZack Rusin return false;
134035d86fb6SZack Rusin #endif
134135d86fb6SZack Rusin }
134235d86fb6SZack Rusin
1343153b3d5bSThomas Hellstrom /**
1344153b3d5bSThomas Hellstrom * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1345153b3d5bSThomas Hellstrom *
1346153b3d5bSThomas Hellstrom * @dev_priv: Pointer to device private struct.
1347153b3d5bSThomas Hellstrom * Needs the reservation sem to be held in non-exclusive mode.
1348153b3d5bSThomas Hellstrom */
__vmw_svga_enable(struct vmw_private * dev_priv)1349b9eb1a61SThomas Hellstrom static void __vmw_svga_enable(struct vmw_private *dev_priv)
1350153b3d5bSThomas Hellstrom {
13519de59bc2SDave Airlie struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1352089cafc1SDave Airlie
13539de59bc2SDave Airlie if (!ttm_resource_manager_used(man)) {
13542cc8bfeeSZack Rusin vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
13559de59bc2SDave Airlie ttm_resource_manager_set_used(man, true);
1356153b3d5bSThomas Hellstrom }
1357153b3d5bSThomas Hellstrom }
1358153b3d5bSThomas Hellstrom
1359153b3d5bSThomas Hellstrom /**
1360153b3d5bSThomas Hellstrom * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1361153b3d5bSThomas Hellstrom *
1362153b3d5bSThomas Hellstrom * @dev_priv: Pointer to device private struct.
1363153b3d5bSThomas Hellstrom */
vmw_svga_enable(struct vmw_private * dev_priv)1364153b3d5bSThomas Hellstrom void vmw_svga_enable(struct vmw_private *dev_priv)
1365153b3d5bSThomas Hellstrom {
1366153b3d5bSThomas Hellstrom __vmw_svga_enable(dev_priv);
1367153b3d5bSThomas Hellstrom }
1368153b3d5bSThomas Hellstrom
1369153b3d5bSThomas Hellstrom /**
1370153b3d5bSThomas Hellstrom * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1371153b3d5bSThomas Hellstrom *
1372153b3d5bSThomas Hellstrom * @dev_priv: Pointer to device private struct.
1373153b3d5bSThomas Hellstrom * Needs the reservation sem to be held in exclusive mode.
1374153b3d5bSThomas Hellstrom * Will not empty VRAM. VRAM must be emptied by caller.
1375153b3d5bSThomas Hellstrom */
__vmw_svga_disable(struct vmw_private * dev_priv)1376b9eb1a61SThomas Hellstrom static void __vmw_svga_disable(struct vmw_private *dev_priv)
1377153b3d5bSThomas Hellstrom {
13789de59bc2SDave Airlie struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1379089cafc1SDave Airlie
13809de59bc2SDave Airlie if (ttm_resource_manager_used(man)) {
13819de59bc2SDave Airlie ttm_resource_manager_set_used(man, false);
1382153b3d5bSThomas Hellstrom vmw_write(dev_priv, SVGA_REG_ENABLE,
13838ce75f8aSSinclair Yeh SVGA_REG_ENABLE_HIDE |
13848ce75f8aSSinclair Yeh SVGA_REG_ENABLE_ENABLE);
1385153b3d5bSThomas Hellstrom }
1386153b3d5bSThomas Hellstrom }
1387153b3d5bSThomas Hellstrom
1388153b3d5bSThomas Hellstrom /**
1389153b3d5bSThomas Hellstrom * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1390153b3d5bSThomas Hellstrom * running.
1391153b3d5bSThomas Hellstrom *
1392153b3d5bSThomas Hellstrom * @dev_priv: Pointer to device private struct.
1393153b3d5bSThomas Hellstrom * Will empty VRAM.
1394153b3d5bSThomas Hellstrom */
vmw_svga_disable(struct vmw_private * dev_priv)1395153b3d5bSThomas Hellstrom void vmw_svga_disable(struct vmw_private *dev_priv)
1396153b3d5bSThomas Hellstrom {
13979de59bc2SDave Airlie struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1398140bcaa2SThomas Hellstrom /*
1399140bcaa2SThomas Hellstrom * Disabling SVGA will turn off device modesetting capabilities, so
1400140bcaa2SThomas Hellstrom * notify KMS about that so that it doesn't cache atomic state that
1401140bcaa2SThomas Hellstrom * isn't valid anymore, for example crtcs turned on.
1402140bcaa2SThomas Hellstrom * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1403140bcaa2SThomas Hellstrom * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1404140bcaa2SThomas Hellstrom * end up with lock order reversal. Thus, a master may actually perform
1405140bcaa2SThomas Hellstrom * a new modeset just after we call vmw_kms_lost_device() and race with
1406140bcaa2SThomas Hellstrom * vmw_svga_disable(), but that should at worst cause atomic KMS state
1407140bcaa2SThomas Hellstrom * to be inconsistent with the device, causing modesetting problems.
1408140bcaa2SThomas Hellstrom *
1409140bcaa2SThomas Hellstrom */
14109703bb32SZack Rusin vmw_kms_lost_device(&dev_priv->drm);
14119de59bc2SDave Airlie if (ttm_resource_manager_used(man)) {
14124ce032d6SChristian König if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
1413153b3d5bSThomas Hellstrom DRM_ERROR("Failed evicting VRAM buffers.\n");
1414ade94143SDaniel Vetter ttm_resource_manager_set_used(man, false);
14158ce75f8aSSinclair Yeh vmw_write(dev_priv, SVGA_REG_ENABLE,
14168ce75f8aSSinclair Yeh SVGA_REG_ENABLE_HIDE |
14178ce75f8aSSinclair Yeh SVGA_REG_ENABLE_ENABLE);
1418ff36baf8SDaniel Vetter }
1419153b3d5bSThomas Hellstrom }
1420fb1d9738SJakob Bornecrantz
vmw_remove(struct pci_dev * pdev)1421fb1d9738SJakob Bornecrantz static void vmw_remove(struct pci_dev *pdev)
1422fb1d9738SJakob Bornecrantz {
1423fb1d9738SJakob Bornecrantz struct drm_device *dev = pci_get_drvdata(pdev);
1424fb1d9738SJakob Bornecrantz
142536891da8SThomas Zimmermann drm_dev_unregister(dev);
142636891da8SThomas Zimmermann vmw_driver_unload(dev);
1427fb1d9738SJakob Bornecrantz }
1428fb1d9738SJakob Bornecrantz
vmw_debugfs_resource_managers_init(struct vmw_private * vmw)1429af4a25bbSZack Rusin static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
1430af4a25bbSZack Rusin {
1431af4a25bbSZack Rusin struct drm_minor *minor = vmw->drm.primary;
1432af4a25bbSZack Rusin struct dentry *root = minor->debugfs_root;
1433af4a25bbSZack Rusin
1434af4a25bbSZack Rusin ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_SYSTEM),
1435af4a25bbSZack Rusin root, "system_ttm");
1436af4a25bbSZack Rusin ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
1437af4a25bbSZack Rusin root, "vram_ttm");
1438042ef0afSJocelyn Falempe if (vmw->has_gmr)
1439af4a25bbSZack Rusin ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
1440af4a25bbSZack Rusin root, "gmr_ttm");
1441042ef0afSJocelyn Falempe if (vmw->has_mob) {
1442af4a25bbSZack Rusin ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
1443af4a25bbSZack Rusin root, "mob_ttm");
1444af4a25bbSZack Rusin ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
1445af4a25bbSZack Rusin root, "system_mob_ttm");
1446af4a25bbSZack Rusin }
1447042ef0afSJocelyn Falempe }
1448af4a25bbSZack Rusin
vmwgfx_pm_notifier(struct notifier_block * nb,unsigned long val,void * ptr)1449d9f36a00SThomas Hellstrom static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1450d9f36a00SThomas Hellstrom void *ptr)
1451d9f36a00SThomas Hellstrom {
1452d9f36a00SThomas Hellstrom struct vmw_private *dev_priv =
1453d9f36a00SThomas Hellstrom container_of(nb, struct vmw_private, pm_nb);
1454d9f36a00SThomas Hellstrom
1455d9f36a00SThomas Hellstrom switch (val) {
1456d9f36a00SThomas Hellstrom case PM_HIBERNATION_PREPARE:
1457153b3d5bSThomas Hellstrom /*
1458c3b9b165SThomas Hellstrom * Take the reservation sem in write mode, which will make sure
1459c3b9b165SThomas Hellstrom * there are no other processes holding a buffer object
1460c3b9b165SThomas Hellstrom * reservation, meaning we should be able to evict all buffer
1461c3b9b165SThomas Hellstrom * objects if needed.
1462c3b9b165SThomas Hellstrom * Once user-space processes have been frozen, we can release
1463c3b9b165SThomas Hellstrom * the lock again.
1464d9f36a00SThomas Hellstrom */
1465c3b9b165SThomas Hellstrom dev_priv->suspend_locked = true;
1466d9f36a00SThomas Hellstrom break;
1467d9f36a00SThomas Hellstrom case PM_POST_HIBERNATION:
1468094e0fa8SThomas Hellstrom case PM_POST_RESTORE:
1469c3b9b165SThomas Hellstrom if (READ_ONCE(dev_priv->suspend_locked)) {
1470c3b9b165SThomas Hellstrom dev_priv->suspend_locked = false;
1471c3b9b165SThomas Hellstrom }
1472d9f36a00SThomas Hellstrom break;
1473d9f36a00SThomas Hellstrom default:
1474d9f36a00SThomas Hellstrom break;
1475d9f36a00SThomas Hellstrom }
1476d9f36a00SThomas Hellstrom return 0;
1477d9f36a00SThomas Hellstrom }
1478d9f36a00SThomas Hellstrom
vmw_pci_suspend(struct pci_dev * pdev,pm_message_t state)14797fbd721aSThomas Hellstrom static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1480d9f36a00SThomas Hellstrom {
1481094e0fa8SThomas Hellstrom struct drm_device *dev = pci_get_drvdata(pdev);
1482094e0fa8SThomas Hellstrom struct vmw_private *dev_priv = vmw_priv(dev);
1483094e0fa8SThomas Hellstrom
1484153b3d5bSThomas Hellstrom if (dev_priv->refuse_hibernation)
1485094e0fa8SThomas Hellstrom return -EBUSY;
1486094e0fa8SThomas Hellstrom
1487d9f36a00SThomas Hellstrom pci_save_state(pdev);
1488d9f36a00SThomas Hellstrom pci_disable_device(pdev);
1489d9f36a00SThomas Hellstrom pci_set_power_state(pdev, PCI_D3hot);
1490d9f36a00SThomas Hellstrom return 0;
1491d9f36a00SThomas Hellstrom }
1492d9f36a00SThomas Hellstrom
vmw_pci_resume(struct pci_dev * pdev)14937fbd721aSThomas Hellstrom static int vmw_pci_resume(struct pci_dev *pdev)
1494d9f36a00SThomas Hellstrom {
1495d9f36a00SThomas Hellstrom pci_set_power_state(pdev, PCI_D0);
1496d9f36a00SThomas Hellstrom pci_restore_state(pdev);
1497d9f36a00SThomas Hellstrom return pci_enable_device(pdev);
1498d9f36a00SThomas Hellstrom }
1499d9f36a00SThomas Hellstrom
vmw_pm_suspend(struct device * kdev)15007fbd721aSThomas Hellstrom static int vmw_pm_suspend(struct device *kdev)
15017fbd721aSThomas Hellstrom {
15027fbd721aSThomas Hellstrom struct pci_dev *pdev = to_pci_dev(kdev);
15037fbd721aSThomas Hellstrom struct pm_message dummy;
15047fbd721aSThomas Hellstrom
15057fbd721aSThomas Hellstrom dummy.event = 0;
15067fbd721aSThomas Hellstrom
15077fbd721aSThomas Hellstrom return vmw_pci_suspend(pdev, dummy);
15087fbd721aSThomas Hellstrom }
15097fbd721aSThomas Hellstrom
vmw_pm_resume(struct device * kdev)15107fbd721aSThomas Hellstrom static int vmw_pm_resume(struct device *kdev)
15117fbd721aSThomas Hellstrom {
15127fbd721aSThomas Hellstrom struct pci_dev *pdev = to_pci_dev(kdev);
15137fbd721aSThomas Hellstrom
15147fbd721aSThomas Hellstrom return vmw_pci_resume(pdev);
15157fbd721aSThomas Hellstrom }
15167fbd721aSThomas Hellstrom
vmw_pm_freeze(struct device * kdev)1517153b3d5bSThomas Hellstrom static int vmw_pm_freeze(struct device *kdev)
15187fbd721aSThomas Hellstrom {
15197fbd721aSThomas Hellstrom struct pci_dev *pdev = to_pci_dev(kdev);
15207fbd721aSThomas Hellstrom struct drm_device *dev = pci_get_drvdata(pdev);
15217fbd721aSThomas Hellstrom struct vmw_private *dev_priv = vmw_priv(dev);
1522d7c59750SChristian König struct ttm_operation_ctx ctx = {
1523d7c59750SChristian König .interruptible = false,
1524d7c59750SChristian König .no_wait_gpu = false
1525d7c59750SChristian König };
1526c3b9b165SThomas Hellstrom int ret;
15277fbd721aSThomas Hellstrom
1528c3b9b165SThomas Hellstrom /*
1529c3b9b165SThomas Hellstrom * No user-space processes should be running now.
1530c3b9b165SThomas Hellstrom */
15319703bb32SZack Rusin ret = vmw_kms_suspend(&dev_priv->drm);
1532c3b9b165SThomas Hellstrom if (ret) {
1533c3b9b165SThomas Hellstrom DRM_ERROR("Failed to freeze modesetting.\n");
1534c3b9b165SThomas Hellstrom return ret;
1535c3b9b165SThomas Hellstrom }
15367fbd721aSThomas Hellstrom
1537c3b9b165SThomas Hellstrom vmw_execbuf_release_pinned_bo(dev_priv);
1538c3b9b165SThomas Hellstrom vmw_resource_evict_all(dev_priv);
1539c3b9b165SThomas Hellstrom vmw_release_device_early(dev_priv);
1540f9e2a03eSChristian König while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
1541c3b9b165SThomas Hellstrom vmw_fifo_resource_dec(dev_priv);
1542153b3d5bSThomas Hellstrom if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1543153b3d5bSThomas Hellstrom DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1544153b3d5bSThomas Hellstrom vmw_fifo_resource_inc(dev_priv);
1545153b3d5bSThomas Hellstrom WARN_ON(vmw_request_device_late(dev_priv));
1546c3b9b165SThomas Hellstrom dev_priv->suspend_locked = false;
1547c3b9b165SThomas Hellstrom if (dev_priv->suspend_state)
1548c3b9b165SThomas Hellstrom vmw_kms_resume(dev);
15497fbd721aSThomas Hellstrom return -EBUSY;
15507fbd721aSThomas Hellstrom }
15517fbd721aSThomas Hellstrom
1552c3b9b165SThomas Hellstrom vmw_fence_fifo_down(dev_priv->fman);
1553153b3d5bSThomas Hellstrom __vmw_svga_disable(dev_priv);
1554153b3d5bSThomas Hellstrom
1555153b3d5bSThomas Hellstrom vmw_release_device_late(dev_priv);
15567fbd721aSThomas Hellstrom return 0;
15577fbd721aSThomas Hellstrom }
15587fbd721aSThomas Hellstrom
vmw_pm_restore(struct device * kdev)1559153b3d5bSThomas Hellstrom static int vmw_pm_restore(struct device *kdev)
15607fbd721aSThomas Hellstrom {
15617fbd721aSThomas Hellstrom struct pci_dev *pdev = to_pci_dev(kdev);
15627fbd721aSThomas Hellstrom struct drm_device *dev = pci_get_drvdata(pdev);
15637fbd721aSThomas Hellstrom struct vmw_private *dev_priv = vmw_priv(dev);
1564153b3d5bSThomas Hellstrom int ret;
15657fbd721aSThomas Hellstrom
15662cd80dbdSZack Rusin vmw_detect_version(dev_priv);
156795e8f6a2SThomas Hellstrom
1568153b3d5bSThomas Hellstrom vmw_fifo_resource_inc(dev_priv);
1569153b3d5bSThomas Hellstrom
1570153b3d5bSThomas Hellstrom ret = vmw_request_device(dev_priv);
1571153b3d5bSThomas Hellstrom if (ret)
1572153b3d5bSThomas Hellstrom return ret;
1573153b3d5bSThomas Hellstrom
1574153b3d5bSThomas Hellstrom __vmw_svga_enable(dev_priv);
15757fbd721aSThomas Hellstrom
1576c3b9b165SThomas Hellstrom vmw_fence_fifo_up(dev_priv->fman);
1577c3b9b165SThomas Hellstrom dev_priv->suspend_locked = false;
1578c3b9b165SThomas Hellstrom if (dev_priv->suspend_state)
15799703bb32SZack Rusin vmw_kms_resume(&dev_priv->drm);
1580c3b9b165SThomas Hellstrom
1581153b3d5bSThomas Hellstrom return 0;
15827fbd721aSThomas Hellstrom }
15837fbd721aSThomas Hellstrom
15847fbd721aSThomas Hellstrom static const struct dev_pm_ops vmw_pm_ops = {
1585153b3d5bSThomas Hellstrom .freeze = vmw_pm_freeze,
1586153b3d5bSThomas Hellstrom .thaw = vmw_pm_restore,
1587153b3d5bSThomas Hellstrom .restore = vmw_pm_restore,
15887fbd721aSThomas Hellstrom .suspend = vmw_pm_suspend,
15897fbd721aSThomas Hellstrom .resume = vmw_pm_resume,
15907fbd721aSThomas Hellstrom };
15917fbd721aSThomas Hellstrom
1592e08e96deSArjan van de Ven static const struct file_operations vmwgfx_driver_fops = {
1593e08e96deSArjan van de Ven .owner = THIS_MODULE,
1594e08e96deSArjan van de Ven .open = drm_open,
1595e08e96deSArjan van de Ven .release = drm_release,
1596e08e96deSArjan van de Ven .unlocked_ioctl = vmw_unlocked_ioctl,
15979da2957fSZack Rusin .mmap = drm_gem_mmap,
15982cd80dbdSZack Rusin .poll = drm_poll,
15992cd80dbdSZack Rusin .read = drm_read,
1600e08e96deSArjan van de Ven #if defined(CONFIG_COMPAT)
160164190bdeSThomas Hellstrom .compat_ioctl = vmw_compat_ioctl,
1602e08e96deSArjan van de Ven #endif
1603e08e96deSArjan van de Ven .llseek = noop_llseek,
1604e08e96deSArjan van de Ven };
1605e08e96deSArjan van de Ven
160670a59dd8SDaniel Vetter static const struct drm_driver driver = {
16071ff49481SDaniel Vetter .driver_features =
160887b3b45cSZack Rusin DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM | DRIVER_CURSOR_HOTSPOT,
1609fb1d9738SJakob Bornecrantz .ioctls = vmw_ioctls,
1610f95aeb17SDamien Lespiau .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1611fb1d9738SJakob Bornecrantz .master_set = vmw_master_set,
1612fb1d9738SJakob Bornecrantz .master_drop = vmw_master_drop,
1613fb1d9738SJakob Bornecrantz .open = vmw_driver_open,
1614fb1d9738SJakob Bornecrantz .postclose = vmw_postclose,
16155e1782d2SDave Airlie
16165e1782d2SDave Airlie .dumb_create = vmw_dumb_create,
16178afa13a0SZack Rusin .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
16185e1782d2SDave Airlie
161969977ff5SThomas Hellstrom .prime_fd_to_handle = vmw_prime_fd_to_handle,
162069977ff5SThomas Hellstrom .prime_handle_to_fd = vmw_prime_handle_to_fd,
162165674218SZack Rusin .gem_prime_import_sg_table = vmw_prime_import_sg_table,
162269977ff5SThomas Hellstrom
1623e08e96deSArjan van de Ven .fops = &vmwgfx_driver_fops,
16248410ea3bSDave Airlie .name = VMWGFX_DRIVER_NAME,
16258410ea3bSDave Airlie .desc = VMWGFX_DRIVER_DESC,
16268410ea3bSDave Airlie .date = VMWGFX_DRIVER_DATE,
16278410ea3bSDave Airlie .major = VMWGFX_DRIVER_MAJOR,
16288410ea3bSDave Airlie .minor = VMWGFX_DRIVER_MINOR,
16298410ea3bSDave Airlie .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
16308410ea3bSDave Airlie };
16318410ea3bSDave Airlie
16328410ea3bSDave Airlie static struct pci_driver vmw_pci_driver = {
1633fb1d9738SJakob Bornecrantz .name = VMWGFX_DRIVER_NAME,
1634fb1d9738SJakob Bornecrantz .id_table = vmw_pci_id_list,
1635fb1d9738SJakob Bornecrantz .probe = vmw_probe,
1636d9f36a00SThomas Hellstrom .remove = vmw_remove,
16377fbd721aSThomas Hellstrom .driver = {
16387fbd721aSThomas Hellstrom .pm = &vmw_pm_ops
16397fbd721aSThomas Hellstrom }
1640fb1d9738SJakob Bornecrantz };
1641fb1d9738SJakob Bornecrantz
vmw_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1642fb1d9738SJakob Bornecrantz static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1643fb1d9738SJakob Bornecrantz {
16449703bb32SZack Rusin struct vmw_private *vmw;
164536891da8SThomas Zimmermann int ret;
164636891da8SThomas Zimmermann
164797c9bfe3SThomas Zimmermann ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
164831856c8cSZack Rusin if (ret)
164928b5f3b6SZack Rusin goto out_error;
165031856c8cSZack Rusin
16519703bb32SZack Rusin ret = pcim_enable_device(pdev);
165236891da8SThomas Zimmermann if (ret)
165328b5f3b6SZack Rusin goto out_error;
165436891da8SThomas Zimmermann
16559703bb32SZack Rusin vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
16569703bb32SZack Rusin struct vmw_private, drm);
165728b5f3b6SZack Rusin if (IS_ERR(vmw)) {
165828b5f3b6SZack Rusin ret = PTR_ERR(vmw);
165928b5f3b6SZack Rusin goto out_error;
166028b5f3b6SZack Rusin }
16619703bb32SZack Rusin
16629703bb32SZack Rusin pci_set_drvdata(pdev, &vmw->drm);
16639703bb32SZack Rusin
16649703bb32SZack Rusin ret = vmw_driver_load(vmw, ent->device);
16659703bb32SZack Rusin if (ret)
16668aadeb8aSZack Rusin goto out_error;
16679703bb32SZack Rusin
16689703bb32SZack Rusin ret = drm_dev_register(&vmw->drm, 0);
166928b5f3b6SZack Rusin if (ret)
167028b5f3b6SZack Rusin goto out_unload;
167136891da8SThomas Zimmermann
1672df42523cSZack Rusin vmw_fifo_resource_inc(vmw);
1673df42523cSZack Rusin vmw_svga_enable(vmw);
1674df42523cSZack Rusin drm_fbdev_generic_setup(&vmw->drm, 0);
1675df42523cSZack Rusin
16768afa13a0SZack Rusin vmw_debugfs_gem_init(vmw);
1677af4a25bbSZack Rusin vmw_debugfs_resource_managers_init(vmw);
16788afa13a0SZack Rusin
167936891da8SThomas Zimmermann return 0;
168028b5f3b6SZack Rusin out_unload:
168128b5f3b6SZack Rusin vmw_driver_unload(&vmw->drm);
168228b5f3b6SZack Rusin out_error:
168328b5f3b6SZack Rusin return ret;
1684fb1d9738SJakob Bornecrantz }
1685fb1d9738SJakob Bornecrantz
1686df8d1d0aSThomas Zimmermann drm_module_pci_driver(vmw_pci_driver);
1687fb1d9738SJakob Bornecrantz
1688fb1d9738SJakob Bornecrantz MODULE_AUTHOR("VMware Inc. and others");
1689fb1d9738SJakob Bornecrantz MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1690fb1d9738SJakob Bornecrantz MODULE_LICENSE("GPL and additional rights");
169173558eadSThomas Hellstrom MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
169273558eadSThomas Hellstrom __stringify(VMWGFX_DRIVER_MINOR) "."
169373558eadSThomas Hellstrom __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
169473558eadSThomas Hellstrom "0");
1695