1dff96888SDirk Hohndel (VMware) // SPDX-License-Identifier: GPL-2.0 OR MIT
2543831cfSThomas Hellstrom /**************************************************************************
3543831cfSThomas Hellstrom  *
409881d29SZack Rusin  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5543831cfSThomas Hellstrom  *
6543831cfSThomas Hellstrom  * Permission is hereby granted, free of charge, to any person obtaining a
7543831cfSThomas Hellstrom  * copy of this software and associated documentation files (the
8543831cfSThomas Hellstrom  * "Software"), to deal in the Software without restriction, including
9543831cfSThomas Hellstrom  * without limitation the rights to use, copy, modify, merge, publish,
10543831cfSThomas Hellstrom  * distribute, sub license, and/or sell copies of the Software, and to
11543831cfSThomas Hellstrom  * permit persons to whom the Software is furnished to do so, subject to
12543831cfSThomas Hellstrom  * the following conditions:
13543831cfSThomas Hellstrom  *
14543831cfSThomas Hellstrom  * The above copyright notice and this permission notice (including the
15543831cfSThomas Hellstrom  * next paragraph) shall be included in all copies or substantial portions
16543831cfSThomas Hellstrom  * of the Software.
17543831cfSThomas Hellstrom  *
18543831cfSThomas Hellstrom  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19543831cfSThomas Hellstrom  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20543831cfSThomas Hellstrom  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21543831cfSThomas Hellstrom  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22543831cfSThomas Hellstrom  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23543831cfSThomas Hellstrom  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24543831cfSThomas Hellstrom  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25543831cfSThomas Hellstrom  *
26543831cfSThomas Hellstrom  **************************************************************************/
27543831cfSThomas Hellstrom 
28008be682SMasahiro Yamada #include <drm/ttm/ttm_placement.h>
29008be682SMasahiro Yamada 
3009881d29SZack Rusin #include "vmwgfx_binding.h"
3109881d29SZack Rusin #include "vmwgfx_bo.h"
32543831cfSThomas Hellstrom #include "vmwgfx_drv.h"
33543831cfSThomas Hellstrom #include "vmwgfx_resource_priv.h"
34543831cfSThomas Hellstrom 
35543831cfSThomas Hellstrom struct vmw_user_context {
36543831cfSThomas Hellstrom 	struct ttm_base_object base;
37543831cfSThomas Hellstrom 	struct vmw_resource res;
38d80efd5cSThomas Hellstrom 	struct vmw_ctx_binding_state *cbs;
3918e4a466SThomas Hellstrom 	struct vmw_cmdbuf_res_manager *man;
405e8ec0d9SDeepak Rawat 	struct vmw_resource *cotables[SVGA_COTABLE_MAX];
41d80efd5cSThomas Hellstrom 	spinlock_t cotable_lock;
4209881d29SZack Rusin 	struct vmw_bo *dx_query_mob;
43543831cfSThomas Hellstrom };
44543831cfSThomas Hellstrom 
45543831cfSThomas Hellstrom static void vmw_user_context_free(struct vmw_resource *res);
46543831cfSThomas Hellstrom static struct vmw_resource *
47543831cfSThomas Hellstrom vmw_user_context_base_to_res(struct ttm_base_object *base);
48543831cfSThomas Hellstrom 
4958a0c5f0SThomas Hellstrom static int vmw_gb_context_create(struct vmw_resource *res);
5058a0c5f0SThomas Hellstrom static int vmw_gb_context_bind(struct vmw_resource *res,
5158a0c5f0SThomas Hellstrom 			       struct ttm_validate_buffer *val_buf);
5258a0c5f0SThomas Hellstrom static int vmw_gb_context_unbind(struct vmw_resource *res,
5358a0c5f0SThomas Hellstrom 				 bool readback,
5458a0c5f0SThomas Hellstrom 				 struct ttm_validate_buffer *val_buf);
5558a0c5f0SThomas Hellstrom static int vmw_gb_context_destroy(struct vmw_resource *res);
56d80efd5cSThomas Hellstrom static int vmw_dx_context_create(struct vmw_resource *res);
57d80efd5cSThomas Hellstrom static int vmw_dx_context_bind(struct vmw_resource *res,
58d80efd5cSThomas Hellstrom 			       struct ttm_validate_buffer *val_buf);
59d80efd5cSThomas Hellstrom static int vmw_dx_context_unbind(struct vmw_resource *res,
60d80efd5cSThomas Hellstrom 				 bool readback,
61d80efd5cSThomas Hellstrom 				 struct ttm_validate_buffer *val_buf);
62d80efd5cSThomas Hellstrom static int vmw_dx_context_destroy(struct vmw_resource *res);
63d80efd5cSThomas Hellstrom 
64543831cfSThomas Hellstrom static const struct vmw_user_resource_conv user_context_conv = {
65543831cfSThomas Hellstrom 	.object_type = VMW_RES_CONTEXT,
66543831cfSThomas Hellstrom 	.base_obj_to_res = vmw_user_context_base_to_res,
67543831cfSThomas Hellstrom 	.res_free = vmw_user_context_free
68543831cfSThomas Hellstrom };
69543831cfSThomas Hellstrom 
70543831cfSThomas Hellstrom const struct vmw_user_resource_conv *user_context_converter =
71543831cfSThomas Hellstrom 	&user_context_conv;
72543831cfSThomas Hellstrom 
73543831cfSThomas Hellstrom 
74543831cfSThomas Hellstrom static const struct vmw_res_func vmw_legacy_context_func = {
75543831cfSThomas Hellstrom 	.res_type = vmw_res_context,
76*668b2066SZack Rusin 	.needs_guest_memory = false,
77543831cfSThomas Hellstrom 	.may_evict = false,
78543831cfSThomas Hellstrom 	.type_name = "legacy contexts",
7939985eeaSZack Rusin 	.domain = VMW_BO_DOMAIN_SYS,
8039985eeaSZack Rusin 	.busy_domain = VMW_BO_DOMAIN_SYS,
81543831cfSThomas Hellstrom 	.create = NULL,
82543831cfSThomas Hellstrom 	.destroy = NULL,
83543831cfSThomas Hellstrom 	.bind = NULL,
84543831cfSThomas Hellstrom 	.unbind = NULL
85543831cfSThomas Hellstrom };
86543831cfSThomas Hellstrom 
8758a0c5f0SThomas Hellstrom static const struct vmw_res_func vmw_gb_context_func = {
8858a0c5f0SThomas Hellstrom 	.res_type = vmw_res_context,
89*668b2066SZack Rusin 	.needs_guest_memory = true,
9058a0c5f0SThomas Hellstrom 	.may_evict = true,
91a0a63940SThomas Hellstrom 	.prio = 3,
92a0a63940SThomas Hellstrom 	.dirty_prio = 3,
9358a0c5f0SThomas Hellstrom 	.type_name = "guest backed contexts",
9439985eeaSZack Rusin 	.domain = VMW_BO_DOMAIN_MOB,
9539985eeaSZack Rusin 	.busy_domain = VMW_BO_DOMAIN_MOB,
9658a0c5f0SThomas Hellstrom 	.create = vmw_gb_context_create,
9758a0c5f0SThomas Hellstrom 	.destroy = vmw_gb_context_destroy,
9858a0c5f0SThomas Hellstrom 	.bind = vmw_gb_context_bind,
9958a0c5f0SThomas Hellstrom 	.unbind = vmw_gb_context_unbind
10058a0c5f0SThomas Hellstrom };
10158a0c5f0SThomas Hellstrom 
102d80efd5cSThomas Hellstrom static const struct vmw_res_func vmw_dx_context_func = {
103d80efd5cSThomas Hellstrom 	.res_type = vmw_res_dx_context,
104*668b2066SZack Rusin 	.needs_guest_memory = true,
105d80efd5cSThomas Hellstrom 	.may_evict = true,
106a0a63940SThomas Hellstrom 	.prio = 3,
107a0a63940SThomas Hellstrom 	.dirty_prio = 3,
108d80efd5cSThomas Hellstrom 	.type_name = "dx contexts",
10939985eeaSZack Rusin 	.domain = VMW_BO_DOMAIN_MOB,
11039985eeaSZack Rusin 	.busy_domain = VMW_BO_DOMAIN_MOB,
111d80efd5cSThomas Hellstrom 	.create = vmw_dx_context_create,
112d80efd5cSThomas Hellstrom 	.destroy = vmw_dx_context_destroy,
113d80efd5cSThomas Hellstrom 	.bind = vmw_dx_context_bind,
114d80efd5cSThomas Hellstrom 	.unbind = vmw_dx_context_unbind
115d80efd5cSThomas Hellstrom };
116b5c3b1a6SThomas Hellstrom 
117ea716197SLee Jones /*
118543831cfSThomas Hellstrom  * Context management:
119543831cfSThomas Hellstrom  */
120543831cfSThomas Hellstrom 
vmw_context_cotables_unref(struct vmw_private * dev_priv,struct vmw_user_context * uctx)1215e8ec0d9SDeepak Rawat static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
1225e8ec0d9SDeepak Rawat 				       struct vmw_user_context *uctx)
123d80efd5cSThomas Hellstrom {
124d80efd5cSThomas Hellstrom 	struct vmw_resource *res;
125d80efd5cSThomas Hellstrom 	int i;
1265e8ec0d9SDeepak Rawat 	u32 cotable_max = has_sm5_context(dev_priv) ?
1275e8ec0d9SDeepak Rawat 		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
128d80efd5cSThomas Hellstrom 
1295e8ec0d9SDeepak Rawat 	for (i = 0; i < cotable_max; ++i) {
130d80efd5cSThomas Hellstrom 		spin_lock(&uctx->cotable_lock);
131d80efd5cSThomas Hellstrom 		res = uctx->cotables[i];
132d80efd5cSThomas Hellstrom 		uctx->cotables[i] = NULL;
133d80efd5cSThomas Hellstrom 		spin_unlock(&uctx->cotable_lock);
134fd11a3c0SSinclair Yeh 
135fd11a3c0SSinclair Yeh 		if (res)
136d80efd5cSThomas Hellstrom 			vmw_resource_unreference(&res);
137d80efd5cSThomas Hellstrom 	}
138d80efd5cSThomas Hellstrom }
139d80efd5cSThomas Hellstrom 
vmw_hw_context_destroy(struct vmw_resource * res)140543831cfSThomas Hellstrom static void vmw_hw_context_destroy(struct vmw_resource *res)
141543831cfSThomas Hellstrom {
14218e4a466SThomas Hellstrom 	struct vmw_user_context *uctx =
14318e4a466SThomas Hellstrom 		container_of(res, struct vmw_user_context, res);
144543831cfSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
145543831cfSThomas Hellstrom 	struct {
146543831cfSThomas Hellstrom 		SVGA3dCmdHeader header;
147543831cfSThomas Hellstrom 		SVGA3dCmdDestroyContext body;
148543831cfSThomas Hellstrom 	} *cmd;
149543831cfSThomas Hellstrom 
150543831cfSThomas Hellstrom 
151d80efd5cSThomas Hellstrom 	if (res->func->destroy == vmw_gb_context_destroy ||
152d80efd5cSThomas Hellstrom 	    res->func->destroy == vmw_dx_context_destroy) {
15358a0c5f0SThomas Hellstrom 		mutex_lock(&dev_priv->cmdbuf_mutex);
15418e4a466SThomas Hellstrom 		vmw_cmdbuf_res_man_destroy(uctx->man);
15530f82d81SThomas Hellstrom 		mutex_lock(&dev_priv->binding_mutex);
156d80efd5cSThomas Hellstrom 		vmw_binding_state_kill(uctx->cbs);
157d80efd5cSThomas Hellstrom 		(void) res->func->destroy(res);
158c8e5e010SThomas Hellstrom 		mutex_unlock(&dev_priv->binding_mutex);
15958a0c5f0SThomas Hellstrom 		if (dev_priv->pinned_bo != NULL &&
16058a0c5f0SThomas Hellstrom 		    !dev_priv->query_cid_valid)
16158a0c5f0SThomas Hellstrom 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
16258a0c5f0SThomas Hellstrom 		mutex_unlock(&dev_priv->cmdbuf_mutex);
1635e8ec0d9SDeepak Rawat 		vmw_context_cotables_unref(dev_priv, uctx);
16458a0c5f0SThomas Hellstrom 		return;
16558a0c5f0SThomas Hellstrom 	}
16658a0c5f0SThomas Hellstrom 
167543831cfSThomas Hellstrom 	vmw_execbuf_release_pinned_bo(dev_priv);
1688426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
16911c45419SDeepak Rawat 	if (unlikely(cmd == NULL))
170543831cfSThomas Hellstrom 		return;
171543831cfSThomas Hellstrom 
172b9eb1a61SThomas Hellstrom 	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
173b9eb1a61SThomas Hellstrom 	cmd->header.size = sizeof(cmd->body);
174b9eb1a61SThomas Hellstrom 	cmd->body.cid = res->id;
175543831cfSThomas Hellstrom 
1768426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
177153b3d5bSThomas Hellstrom 	vmw_fifo_resource_dec(dev_priv);
178543831cfSThomas Hellstrom }
179543831cfSThomas Hellstrom 
vmw_gb_context_init(struct vmw_private * dev_priv,bool dx,struct vmw_resource * res,void (* res_free)(struct vmw_resource * res))18058a0c5f0SThomas Hellstrom static int vmw_gb_context_init(struct vmw_private *dev_priv,
181d80efd5cSThomas Hellstrom 			       bool dx,
18258a0c5f0SThomas Hellstrom 			       struct vmw_resource *res,
18358a0c5f0SThomas Hellstrom 			       void (*res_free)(struct vmw_resource *res))
18458a0c5f0SThomas Hellstrom {
185d80efd5cSThomas Hellstrom 	int ret, i;
186173fb7d4SThomas Hellstrom 	struct vmw_user_context *uctx =
187173fb7d4SThomas Hellstrom 		container_of(res, struct vmw_user_context, res);
18858a0c5f0SThomas Hellstrom 
189*668b2066SZack Rusin 	res->guest_memory_size = (dx ? sizeof(SVGADXContextMobFormat) :
190ebc9ac7cSZack Rusin 				 sizeof(SVGAGBContextData));
19158a0c5f0SThomas Hellstrom 	ret = vmw_resource_init(dev_priv, res, true,
192d80efd5cSThomas Hellstrom 				res_free,
193d80efd5cSThomas Hellstrom 				dx ? &vmw_dx_context_func :
194d80efd5cSThomas Hellstrom 				&vmw_gb_context_func);
19518e4a466SThomas Hellstrom 	if (unlikely(ret != 0))
19618e4a466SThomas Hellstrom 		goto out_err;
19758a0c5f0SThomas Hellstrom 
19818e4a466SThomas Hellstrom 	if (dev_priv->has_mob) {
19918e4a466SThomas Hellstrom 		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
20055579cfeSViresh Kumar 		if (IS_ERR(uctx->man)) {
20118e4a466SThomas Hellstrom 			ret = PTR_ERR(uctx->man);
20218e4a466SThomas Hellstrom 			uctx->man = NULL;
20318e4a466SThomas Hellstrom 			goto out_err;
20418e4a466SThomas Hellstrom 		}
20558a0c5f0SThomas Hellstrom 	}
20658a0c5f0SThomas Hellstrom 
207d80efd5cSThomas Hellstrom 	uctx->cbs = vmw_binding_state_alloc(dev_priv);
208d80efd5cSThomas Hellstrom 	if (IS_ERR(uctx->cbs)) {
209d80efd5cSThomas Hellstrom 		ret = PTR_ERR(uctx->cbs);
210d80efd5cSThomas Hellstrom 		goto out_err;
211d80efd5cSThomas Hellstrom 	}
212d80efd5cSThomas Hellstrom 
213d80efd5cSThomas Hellstrom 	spin_lock_init(&uctx->cotable_lock);
214d80efd5cSThomas Hellstrom 
215d80efd5cSThomas Hellstrom 	if (dx) {
2165e8ec0d9SDeepak Rawat 		u32 cotable_max = has_sm5_context(dev_priv) ?
2175e8ec0d9SDeepak Rawat 			SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
2185e8ec0d9SDeepak Rawat 		for (i = 0; i < cotable_max; ++i) {
219d80efd5cSThomas Hellstrom 			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
220d80efd5cSThomas Hellstrom 							      &uctx->res, i);
2214efa6661SChengguang Xu 			if (IS_ERR(uctx->cotables[i])) {
222d7f48231SChristophe JAILLET 				ret = PTR_ERR(uctx->cotables[i]);
223d80efd5cSThomas Hellstrom 				goto out_cotables;
224d80efd5cSThomas Hellstrom 			}
225d80efd5cSThomas Hellstrom 		}
226d80efd5cSThomas Hellstrom 	}
227d80efd5cSThomas Hellstrom 
22813289241SThomas Hellstrom 	res->hw_destroy = vmw_hw_context_destroy;
22958a0c5f0SThomas Hellstrom 	return 0;
23018e4a466SThomas Hellstrom 
231d80efd5cSThomas Hellstrom out_cotables:
2325e8ec0d9SDeepak Rawat 	vmw_context_cotables_unref(dev_priv, uctx);
23318e4a466SThomas Hellstrom out_err:
23418e4a466SThomas Hellstrom 	if (res_free)
23518e4a466SThomas Hellstrom 		res_free(res);
23618e4a466SThomas Hellstrom 	else
23718e4a466SThomas Hellstrom 		kfree(res);
23818e4a466SThomas Hellstrom 	return ret;
23958a0c5f0SThomas Hellstrom }
24058a0c5f0SThomas Hellstrom 
vmw_context_init(struct vmw_private * dev_priv,struct vmw_resource * res,void (* res_free)(struct vmw_resource * res),bool dx)241543831cfSThomas Hellstrom static int vmw_context_init(struct vmw_private *dev_priv,
242543831cfSThomas Hellstrom 			    struct vmw_resource *res,
243d80efd5cSThomas Hellstrom 			    void (*res_free)(struct vmw_resource *res),
244d80efd5cSThomas Hellstrom 			    bool dx)
245543831cfSThomas Hellstrom {
246543831cfSThomas Hellstrom 	int ret;
247543831cfSThomas Hellstrom 
248543831cfSThomas Hellstrom 	struct {
249543831cfSThomas Hellstrom 		SVGA3dCmdHeader header;
250543831cfSThomas Hellstrom 		SVGA3dCmdDefineContext body;
251543831cfSThomas Hellstrom 	} *cmd;
252543831cfSThomas Hellstrom 
25358a0c5f0SThomas Hellstrom 	if (dev_priv->has_mob)
254d80efd5cSThomas Hellstrom 		return vmw_gb_context_init(dev_priv, dx, res, res_free);
25558a0c5f0SThomas Hellstrom 
256543831cfSThomas Hellstrom 	ret = vmw_resource_init(dev_priv, res, false,
257543831cfSThomas Hellstrom 				res_free, &vmw_legacy_context_func);
258543831cfSThomas Hellstrom 
259543831cfSThomas Hellstrom 	if (unlikely(ret != 0)) {
260543831cfSThomas Hellstrom 		DRM_ERROR("Failed to allocate a resource id.\n");
261543831cfSThomas Hellstrom 		goto out_early;
262543831cfSThomas Hellstrom 	}
263543831cfSThomas Hellstrom 
264ebc9ac7cSZack Rusin 	if (unlikely(res->id >= SVGA3D_HB_MAX_CONTEXT_IDS)) {
265543831cfSThomas Hellstrom 		DRM_ERROR("Out of hw context ids.\n");
266543831cfSThomas Hellstrom 		vmw_resource_unreference(&res);
267543831cfSThomas Hellstrom 		return -ENOMEM;
268543831cfSThomas Hellstrom 	}
269543831cfSThomas Hellstrom 
2708426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
271543831cfSThomas Hellstrom 	if (unlikely(cmd == NULL)) {
272543831cfSThomas Hellstrom 		vmw_resource_unreference(&res);
273543831cfSThomas Hellstrom 		return -ENOMEM;
274543831cfSThomas Hellstrom 	}
275543831cfSThomas Hellstrom 
276b9eb1a61SThomas Hellstrom 	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
277b9eb1a61SThomas Hellstrom 	cmd->header.size = sizeof(cmd->body);
278b9eb1a61SThomas Hellstrom 	cmd->body.cid = res->id;
279543831cfSThomas Hellstrom 
2808426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
281153b3d5bSThomas Hellstrom 	vmw_fifo_resource_inc(dev_priv);
28213289241SThomas Hellstrom 	res->hw_destroy = vmw_hw_context_destroy;
283543831cfSThomas Hellstrom 	return 0;
284543831cfSThomas Hellstrom 
285543831cfSThomas Hellstrom out_early:
286543831cfSThomas Hellstrom 	if (res_free == NULL)
287543831cfSThomas Hellstrom 		kfree(res);
288543831cfSThomas Hellstrom 	else
289543831cfSThomas Hellstrom 		res_free(res);
290543831cfSThomas Hellstrom 	return ret;
291543831cfSThomas Hellstrom }
292543831cfSThomas Hellstrom 
293543831cfSThomas Hellstrom 
294d80efd5cSThomas Hellstrom /*
295d80efd5cSThomas Hellstrom  * GB context.
296d80efd5cSThomas Hellstrom  */
29758a0c5f0SThomas Hellstrom 
vmw_gb_context_create(struct vmw_resource * res)29858a0c5f0SThomas Hellstrom static int vmw_gb_context_create(struct vmw_resource *res)
29958a0c5f0SThomas Hellstrom {
30058a0c5f0SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
30158a0c5f0SThomas Hellstrom 	int ret;
30258a0c5f0SThomas Hellstrom 	struct {
30358a0c5f0SThomas Hellstrom 		SVGA3dCmdHeader header;
30458a0c5f0SThomas Hellstrom 		SVGA3dCmdDefineGBContext body;
30558a0c5f0SThomas Hellstrom 	} *cmd;
30658a0c5f0SThomas Hellstrom 
30758a0c5f0SThomas Hellstrom 	if (likely(res->id != -1))
30858a0c5f0SThomas Hellstrom 		return 0;
30958a0c5f0SThomas Hellstrom 
31058a0c5f0SThomas Hellstrom 	ret = vmw_resource_alloc_id(res);
31158a0c5f0SThomas Hellstrom 	if (unlikely(ret != 0)) {
31258a0c5f0SThomas Hellstrom 		DRM_ERROR("Failed to allocate a context id.\n");
31358a0c5f0SThomas Hellstrom 		goto out_no_id;
31458a0c5f0SThomas Hellstrom 	}
31558a0c5f0SThomas Hellstrom 
31658a0c5f0SThomas Hellstrom 	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
31758a0c5f0SThomas Hellstrom 		ret = -EBUSY;
31858a0c5f0SThomas Hellstrom 		goto out_no_fifo;
31958a0c5f0SThomas Hellstrom 	}
32058a0c5f0SThomas Hellstrom 
3218426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
32258a0c5f0SThomas Hellstrom 	if (unlikely(cmd == NULL)) {
32358a0c5f0SThomas Hellstrom 		ret = -ENOMEM;
32458a0c5f0SThomas Hellstrom 		goto out_no_fifo;
32558a0c5f0SThomas Hellstrom 	}
32658a0c5f0SThomas Hellstrom 
32758a0c5f0SThomas Hellstrom 	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
32858a0c5f0SThomas Hellstrom 	cmd->header.size = sizeof(cmd->body);
32958a0c5f0SThomas Hellstrom 	cmd->body.cid = res->id;
3308426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
331153b3d5bSThomas Hellstrom 	vmw_fifo_resource_inc(dev_priv);
33258a0c5f0SThomas Hellstrom 
33358a0c5f0SThomas Hellstrom 	return 0;
33458a0c5f0SThomas Hellstrom 
33558a0c5f0SThomas Hellstrom out_no_fifo:
33658a0c5f0SThomas Hellstrom 	vmw_resource_release_id(res);
33758a0c5f0SThomas Hellstrom out_no_id:
33858a0c5f0SThomas Hellstrom 	return ret;
33958a0c5f0SThomas Hellstrom }
34058a0c5f0SThomas Hellstrom 
vmw_gb_context_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)34158a0c5f0SThomas Hellstrom static int vmw_gb_context_bind(struct vmw_resource *res,
34258a0c5f0SThomas Hellstrom 			       struct ttm_validate_buffer *val_buf)
34358a0c5f0SThomas Hellstrom {
34458a0c5f0SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
34558a0c5f0SThomas Hellstrom 	struct {
34658a0c5f0SThomas Hellstrom 		SVGA3dCmdHeader header;
34758a0c5f0SThomas Hellstrom 		SVGA3dCmdBindGBContext body;
34858a0c5f0SThomas Hellstrom 	} *cmd;
34958a0c5f0SThomas Hellstrom 	struct ttm_buffer_object *bo = val_buf->bo;
35058a0c5f0SThomas Hellstrom 
351d3116756SChristian König 	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
35258a0c5f0SThomas Hellstrom 
3538426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
35411c45419SDeepak Rawat 	if (unlikely(cmd == NULL))
35558a0c5f0SThomas Hellstrom 		return -ENOMEM;
35611c45419SDeepak Rawat 
35758a0c5f0SThomas Hellstrom 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
35858a0c5f0SThomas Hellstrom 	cmd->header.size = sizeof(cmd->body);
35958a0c5f0SThomas Hellstrom 	cmd->body.cid = res->id;
360d3116756SChristian König 	cmd->body.mobid = bo->resource->start;
361*668b2066SZack Rusin 	cmd->body.validContents = res->guest_memory_dirty;
362*668b2066SZack Rusin 	res->guest_memory_dirty = false;
3638426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
36458a0c5f0SThomas Hellstrom 
36558a0c5f0SThomas Hellstrom 	return 0;
36658a0c5f0SThomas Hellstrom }
36758a0c5f0SThomas Hellstrom 
vmw_gb_context_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)36858a0c5f0SThomas Hellstrom static int vmw_gb_context_unbind(struct vmw_resource *res,
36958a0c5f0SThomas Hellstrom 				 bool readback,
37058a0c5f0SThomas Hellstrom 				 struct ttm_validate_buffer *val_buf)
37158a0c5f0SThomas Hellstrom {
37258a0c5f0SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
37358a0c5f0SThomas Hellstrom 	struct ttm_buffer_object *bo = val_buf->bo;
37458a0c5f0SThomas Hellstrom 	struct vmw_fence_obj *fence;
375173fb7d4SThomas Hellstrom 	struct vmw_user_context *uctx =
376173fb7d4SThomas Hellstrom 		container_of(res, struct vmw_user_context, res);
37758a0c5f0SThomas Hellstrom 
37858a0c5f0SThomas Hellstrom 	struct {
37958a0c5f0SThomas Hellstrom 		SVGA3dCmdHeader header;
38058a0c5f0SThomas Hellstrom 		SVGA3dCmdReadbackGBContext body;
38158a0c5f0SThomas Hellstrom 	} *cmd1;
38258a0c5f0SThomas Hellstrom 	struct {
38358a0c5f0SThomas Hellstrom 		SVGA3dCmdHeader header;
38458a0c5f0SThomas Hellstrom 		SVGA3dCmdBindGBContext body;
38558a0c5f0SThomas Hellstrom 	} *cmd2;
38658a0c5f0SThomas Hellstrom 	uint32_t submit_size;
38758a0c5f0SThomas Hellstrom 	uint8_t *cmd;
38858a0c5f0SThomas Hellstrom 
38958a0c5f0SThomas Hellstrom 
390d3116756SChristian König 	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
39158a0c5f0SThomas Hellstrom 
392173fb7d4SThomas Hellstrom 	mutex_lock(&dev_priv->binding_mutex);
393d80efd5cSThomas Hellstrom 	vmw_binding_state_scrub(uctx->cbs);
394173fb7d4SThomas Hellstrom 
39558a0c5f0SThomas Hellstrom 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
39658a0c5f0SThomas Hellstrom 
3978426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
39858a0c5f0SThomas Hellstrom 	if (unlikely(cmd == NULL)) {
399173fb7d4SThomas Hellstrom 		mutex_unlock(&dev_priv->binding_mutex);
40058a0c5f0SThomas Hellstrom 		return -ENOMEM;
40158a0c5f0SThomas Hellstrom 	}
40258a0c5f0SThomas Hellstrom 
40358a0c5f0SThomas Hellstrom 	cmd2 = (void *) cmd;
40458a0c5f0SThomas Hellstrom 	if (readback) {
40558a0c5f0SThomas Hellstrom 		cmd1 = (void *) cmd;
40658a0c5f0SThomas Hellstrom 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
40758a0c5f0SThomas Hellstrom 		cmd1->header.size = sizeof(cmd1->body);
40858a0c5f0SThomas Hellstrom 		cmd1->body.cid = res->id;
40958a0c5f0SThomas Hellstrom 		cmd2 = (void *) (&cmd1[1]);
41058a0c5f0SThomas Hellstrom 	}
41158a0c5f0SThomas Hellstrom 	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
41258a0c5f0SThomas Hellstrom 	cmd2->header.size = sizeof(cmd2->body);
41358a0c5f0SThomas Hellstrom 	cmd2->body.cid = res->id;
41458a0c5f0SThomas Hellstrom 	cmd2->body.mobid = SVGA3D_INVALID_ID;
41558a0c5f0SThomas Hellstrom 
4168426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, submit_size);
417173fb7d4SThomas Hellstrom 	mutex_unlock(&dev_priv->binding_mutex);
41858a0c5f0SThomas Hellstrom 
41958a0c5f0SThomas Hellstrom 	/*
42058a0c5f0SThomas Hellstrom 	 * Create a fence object and fence the backup buffer.
42158a0c5f0SThomas Hellstrom 	 */
42258a0c5f0SThomas Hellstrom 
42358a0c5f0SThomas Hellstrom 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
42458a0c5f0SThomas Hellstrom 					  &fence, NULL);
42558a0c5f0SThomas Hellstrom 
426e9431ea5SThomas Hellstrom 	vmw_bo_fence_single(bo, fence);
42758a0c5f0SThomas Hellstrom 
42858a0c5f0SThomas Hellstrom 	if (likely(fence != NULL))
42958a0c5f0SThomas Hellstrom 		vmw_fence_obj_unreference(&fence);
43058a0c5f0SThomas Hellstrom 
43158a0c5f0SThomas Hellstrom 	return 0;
43258a0c5f0SThomas Hellstrom }
43358a0c5f0SThomas Hellstrom 
vmw_gb_context_destroy(struct vmw_resource * res)43458a0c5f0SThomas Hellstrom static int vmw_gb_context_destroy(struct vmw_resource *res)
43558a0c5f0SThomas Hellstrom {
43658a0c5f0SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
43758a0c5f0SThomas Hellstrom 	struct {
43858a0c5f0SThomas Hellstrom 		SVGA3dCmdHeader header;
43958a0c5f0SThomas Hellstrom 		SVGA3dCmdDestroyGBContext body;
44058a0c5f0SThomas Hellstrom 	} *cmd;
44158a0c5f0SThomas Hellstrom 
44258a0c5f0SThomas Hellstrom 	if (likely(res->id == -1))
44358a0c5f0SThomas Hellstrom 		return 0;
44458a0c5f0SThomas Hellstrom 
4458426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
44611c45419SDeepak Rawat 	if (unlikely(cmd == NULL))
44758a0c5f0SThomas Hellstrom 		return -ENOMEM;
44858a0c5f0SThomas Hellstrom 
44958a0c5f0SThomas Hellstrom 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
45058a0c5f0SThomas Hellstrom 	cmd->header.size = sizeof(cmd->body);
45158a0c5f0SThomas Hellstrom 	cmd->body.cid = res->id;
4528426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
45358a0c5f0SThomas Hellstrom 	if (dev_priv->query_cid == res->id)
45458a0c5f0SThomas Hellstrom 		dev_priv->query_cid_valid = false;
45558a0c5f0SThomas Hellstrom 	vmw_resource_release_id(res);
456153b3d5bSThomas Hellstrom 	vmw_fifo_resource_dec(dev_priv);
45758a0c5f0SThomas Hellstrom 
45858a0c5f0SThomas Hellstrom 	return 0;
45958a0c5f0SThomas Hellstrom }
46058a0c5f0SThomas Hellstrom 
461d80efd5cSThomas Hellstrom /*
462d80efd5cSThomas Hellstrom  * DX context.
463d80efd5cSThomas Hellstrom  */
464d80efd5cSThomas Hellstrom 
vmw_dx_context_create(struct vmw_resource * res)465d80efd5cSThomas Hellstrom static int vmw_dx_context_create(struct vmw_resource *res)
466d80efd5cSThomas Hellstrom {
467d80efd5cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
468d80efd5cSThomas Hellstrom 	int ret;
469d80efd5cSThomas Hellstrom 	struct {
470d80efd5cSThomas Hellstrom 		SVGA3dCmdHeader header;
471d80efd5cSThomas Hellstrom 		SVGA3dCmdDXDefineContext body;
472d80efd5cSThomas Hellstrom 	} *cmd;
473d80efd5cSThomas Hellstrom 
474d80efd5cSThomas Hellstrom 	if (likely(res->id != -1))
475d80efd5cSThomas Hellstrom 		return 0;
476d80efd5cSThomas Hellstrom 
477d80efd5cSThomas Hellstrom 	ret = vmw_resource_alloc_id(res);
478d80efd5cSThomas Hellstrom 	if (unlikely(ret != 0)) {
479d80efd5cSThomas Hellstrom 		DRM_ERROR("Failed to allocate a context id.\n");
480d80efd5cSThomas Hellstrom 		goto out_no_id;
481d80efd5cSThomas Hellstrom 	}
482d80efd5cSThomas Hellstrom 
483d80efd5cSThomas Hellstrom 	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
484d80efd5cSThomas Hellstrom 		ret = -EBUSY;
485d80efd5cSThomas Hellstrom 		goto out_no_fifo;
486d80efd5cSThomas Hellstrom 	}
487d80efd5cSThomas Hellstrom 
4888426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
489d80efd5cSThomas Hellstrom 	if (unlikely(cmd == NULL)) {
490d80efd5cSThomas Hellstrom 		ret = -ENOMEM;
491d80efd5cSThomas Hellstrom 		goto out_no_fifo;
492d80efd5cSThomas Hellstrom 	}
493d80efd5cSThomas Hellstrom 
494d80efd5cSThomas Hellstrom 	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
495d80efd5cSThomas Hellstrom 	cmd->header.size = sizeof(cmd->body);
496d80efd5cSThomas Hellstrom 	cmd->body.cid = res->id;
4978426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
498d80efd5cSThomas Hellstrom 	vmw_fifo_resource_inc(dev_priv);
499d80efd5cSThomas Hellstrom 
500d80efd5cSThomas Hellstrom 	return 0;
501d80efd5cSThomas Hellstrom 
502d80efd5cSThomas Hellstrom out_no_fifo:
503d80efd5cSThomas Hellstrom 	vmw_resource_release_id(res);
504d80efd5cSThomas Hellstrom out_no_id:
505d80efd5cSThomas Hellstrom 	return ret;
506d80efd5cSThomas Hellstrom }
507d80efd5cSThomas Hellstrom 
vmw_dx_context_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)508d80efd5cSThomas Hellstrom static int vmw_dx_context_bind(struct vmw_resource *res,
509d80efd5cSThomas Hellstrom 			       struct ttm_validate_buffer *val_buf)
510d80efd5cSThomas Hellstrom {
511d80efd5cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
512d80efd5cSThomas Hellstrom 	struct {
513d80efd5cSThomas Hellstrom 		SVGA3dCmdHeader header;
514d80efd5cSThomas Hellstrom 		SVGA3dCmdDXBindContext body;
515d80efd5cSThomas Hellstrom 	} *cmd;
516d80efd5cSThomas Hellstrom 	struct ttm_buffer_object *bo = val_buf->bo;
517d80efd5cSThomas Hellstrom 
518d3116756SChristian König 	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
519d80efd5cSThomas Hellstrom 
5208426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
52111c45419SDeepak Rawat 	if (unlikely(cmd == NULL))
522d80efd5cSThomas Hellstrom 		return -ENOMEM;
523d80efd5cSThomas Hellstrom 
524d80efd5cSThomas Hellstrom 	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
525d80efd5cSThomas Hellstrom 	cmd->header.size = sizeof(cmd->body);
526d80efd5cSThomas Hellstrom 	cmd->body.cid = res->id;
527d3116756SChristian König 	cmd->body.mobid = bo->resource->start;
528*668b2066SZack Rusin 	cmd->body.validContents = res->guest_memory_dirty;
529*668b2066SZack Rusin 	res->guest_memory_dirty = false;
5308426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
531d80efd5cSThomas Hellstrom 
532d80efd5cSThomas Hellstrom 
533d80efd5cSThomas Hellstrom 	return 0;
534d80efd5cSThomas Hellstrom }
535d80efd5cSThomas Hellstrom 
536d80efd5cSThomas Hellstrom /**
537d80efd5cSThomas Hellstrom  * vmw_dx_context_scrub_cotables - Scrub all bindings and
538d80efd5cSThomas Hellstrom  * cotables from a context
539d80efd5cSThomas Hellstrom  *
540d80efd5cSThomas Hellstrom  * @ctx: Pointer to the context resource
541d80efd5cSThomas Hellstrom  * @readback: Whether to save the otable contents on scrubbing.
542d80efd5cSThomas Hellstrom  *
543d80efd5cSThomas Hellstrom  * COtables must be unbound before their context, but unbinding requires
544d80efd5cSThomas Hellstrom  * the backup buffer being reserved, whereas scrubbing does not.
545d80efd5cSThomas Hellstrom  * This function scrubs all cotables of a context, potentially reading back
546d80efd5cSThomas Hellstrom  * the contents into their backup buffers. However, scrubbing cotables
547d80efd5cSThomas Hellstrom  * also makes the device context invalid, so scrub all bindings first so
548d80efd5cSThomas Hellstrom  * that doesn't have to be done later with an invalid context.
549d80efd5cSThomas Hellstrom  */
vmw_dx_context_scrub_cotables(struct vmw_resource * ctx,bool readback)550d80efd5cSThomas Hellstrom void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
551d80efd5cSThomas Hellstrom 				   bool readback)
552d80efd5cSThomas Hellstrom {
553d80efd5cSThomas Hellstrom 	struct vmw_user_context *uctx =
554d80efd5cSThomas Hellstrom 		container_of(ctx, struct vmw_user_context, res);
5555e8ec0d9SDeepak Rawat 	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
5565e8ec0d9SDeepak Rawat 		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
557d80efd5cSThomas Hellstrom 	int i;
558d80efd5cSThomas Hellstrom 
559d80efd5cSThomas Hellstrom 	vmw_binding_state_scrub(uctx->cbs);
5605e8ec0d9SDeepak Rawat 	for (i = 0; i < cotable_max; ++i) {
561d80efd5cSThomas Hellstrom 		struct vmw_resource *res;
562d80efd5cSThomas Hellstrom 
563d80efd5cSThomas Hellstrom 		/* Avoid racing with ongoing cotable destruction. */
564d80efd5cSThomas Hellstrom 		spin_lock(&uctx->cotable_lock);
565d80efd5cSThomas Hellstrom 		res = uctx->cotables[vmw_cotable_scrub_order[i]];
566d80efd5cSThomas Hellstrom 		if (res)
567d80efd5cSThomas Hellstrom 			res = vmw_resource_reference_unless_doomed(res);
568d80efd5cSThomas Hellstrom 		spin_unlock(&uctx->cotable_lock);
569d80efd5cSThomas Hellstrom 		if (!res)
570d80efd5cSThomas Hellstrom 			continue;
571d80efd5cSThomas Hellstrom 
572d80efd5cSThomas Hellstrom 		WARN_ON(vmw_cotable_scrub(res, readback));
573d80efd5cSThomas Hellstrom 		vmw_resource_unreference(&res);
574d80efd5cSThomas Hellstrom 	}
575d80efd5cSThomas Hellstrom }
576d80efd5cSThomas Hellstrom 
vmw_dx_context_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)577d80efd5cSThomas Hellstrom static int vmw_dx_context_unbind(struct vmw_resource *res,
578d80efd5cSThomas Hellstrom 				 bool readback,
579d80efd5cSThomas Hellstrom 				 struct ttm_validate_buffer *val_buf)
580d80efd5cSThomas Hellstrom {
581d80efd5cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
582d80efd5cSThomas Hellstrom 	struct ttm_buffer_object *bo = val_buf->bo;
583d80efd5cSThomas Hellstrom 	struct vmw_fence_obj *fence;
584fd11a3c0SSinclair Yeh 	struct vmw_user_context *uctx =
585fd11a3c0SSinclair Yeh 		container_of(res, struct vmw_user_context, res);
586d80efd5cSThomas Hellstrom 
587d80efd5cSThomas Hellstrom 	struct {
588d80efd5cSThomas Hellstrom 		SVGA3dCmdHeader header;
589d80efd5cSThomas Hellstrom 		SVGA3dCmdDXReadbackContext body;
590d80efd5cSThomas Hellstrom 	} *cmd1;
591d80efd5cSThomas Hellstrom 	struct {
592d80efd5cSThomas Hellstrom 		SVGA3dCmdHeader header;
593d80efd5cSThomas Hellstrom 		SVGA3dCmdDXBindContext body;
594d80efd5cSThomas Hellstrom 	} *cmd2;
595d80efd5cSThomas Hellstrom 	uint32_t submit_size;
596d80efd5cSThomas Hellstrom 	uint8_t *cmd;
597d80efd5cSThomas Hellstrom 
598d80efd5cSThomas Hellstrom 
599d3116756SChristian König 	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
600d80efd5cSThomas Hellstrom 
601d80efd5cSThomas Hellstrom 	mutex_lock(&dev_priv->binding_mutex);
602d80efd5cSThomas Hellstrom 	vmw_dx_context_scrub_cotables(res, readback);
603d80efd5cSThomas Hellstrom 
604fd11a3c0SSinclair Yeh 	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
605fd11a3c0SSinclair Yeh 	    readback) {
606fd11a3c0SSinclair Yeh 		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
607fd11a3c0SSinclair Yeh 		if (vmw_query_readback_all(uctx->dx_query_mob))
608fd11a3c0SSinclair Yeh 			DRM_ERROR("Failed to read back query states\n");
609fd11a3c0SSinclair Yeh 	}
610fd11a3c0SSinclair Yeh 
611d80efd5cSThomas Hellstrom 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
612d80efd5cSThomas Hellstrom 
6138426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
614d80efd5cSThomas Hellstrom 	if (unlikely(cmd == NULL)) {
615d80efd5cSThomas Hellstrom 		mutex_unlock(&dev_priv->binding_mutex);
616d80efd5cSThomas Hellstrom 		return -ENOMEM;
617d80efd5cSThomas Hellstrom 	}
618d80efd5cSThomas Hellstrom 
619d80efd5cSThomas Hellstrom 	cmd2 = (void *) cmd;
620d80efd5cSThomas Hellstrom 	if (readback) {
621d80efd5cSThomas Hellstrom 		cmd1 = (void *) cmd;
622d80efd5cSThomas Hellstrom 		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
623d80efd5cSThomas Hellstrom 		cmd1->header.size = sizeof(cmd1->body);
624d80efd5cSThomas Hellstrom 		cmd1->body.cid = res->id;
625d80efd5cSThomas Hellstrom 		cmd2 = (void *) (&cmd1[1]);
626d80efd5cSThomas Hellstrom 	}
627d80efd5cSThomas Hellstrom 	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
628d80efd5cSThomas Hellstrom 	cmd2->header.size = sizeof(cmd2->body);
629d80efd5cSThomas Hellstrom 	cmd2->body.cid = res->id;
630d80efd5cSThomas Hellstrom 	cmd2->body.mobid = SVGA3D_INVALID_ID;
631d80efd5cSThomas Hellstrom 
6328426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, submit_size);
633d80efd5cSThomas Hellstrom 	mutex_unlock(&dev_priv->binding_mutex);
634d80efd5cSThomas Hellstrom 
635d80efd5cSThomas Hellstrom 	/*
636d80efd5cSThomas Hellstrom 	 * Create a fence object and fence the backup buffer.
637d80efd5cSThomas Hellstrom 	 */
638d80efd5cSThomas Hellstrom 
639d80efd5cSThomas Hellstrom 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
640d80efd5cSThomas Hellstrom 					  &fence, NULL);
641d80efd5cSThomas Hellstrom 
642e9431ea5SThomas Hellstrom 	vmw_bo_fence_single(bo, fence);
643d80efd5cSThomas Hellstrom 
644d80efd5cSThomas Hellstrom 	if (likely(fence != NULL))
645d80efd5cSThomas Hellstrom 		vmw_fence_obj_unreference(&fence);
646d80efd5cSThomas Hellstrom 
647d80efd5cSThomas Hellstrom 	return 0;
648d80efd5cSThomas Hellstrom }
649d80efd5cSThomas Hellstrom 
vmw_dx_context_destroy(struct vmw_resource * res)650d80efd5cSThomas Hellstrom static int vmw_dx_context_destroy(struct vmw_resource *res)
651d80efd5cSThomas Hellstrom {
652d80efd5cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
653d80efd5cSThomas Hellstrom 	struct {
654d80efd5cSThomas Hellstrom 		SVGA3dCmdHeader header;
655d80efd5cSThomas Hellstrom 		SVGA3dCmdDXDestroyContext body;
656d80efd5cSThomas Hellstrom 	} *cmd;
657d80efd5cSThomas Hellstrom 
658d80efd5cSThomas Hellstrom 	if (likely(res->id == -1))
659d80efd5cSThomas Hellstrom 		return 0;
660d80efd5cSThomas Hellstrom 
6618426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
66211c45419SDeepak Rawat 	if (unlikely(cmd == NULL))
663d80efd5cSThomas Hellstrom 		return -ENOMEM;
664d80efd5cSThomas Hellstrom 
665d80efd5cSThomas Hellstrom 	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
666d80efd5cSThomas Hellstrom 	cmd->header.size = sizeof(cmd->body);
667d80efd5cSThomas Hellstrom 	cmd->body.cid = res->id;
6688426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
669d80efd5cSThomas Hellstrom 	if (dev_priv->query_cid == res->id)
670d80efd5cSThomas Hellstrom 		dev_priv->query_cid_valid = false;
671d80efd5cSThomas Hellstrom 	vmw_resource_release_id(res);
672d80efd5cSThomas Hellstrom 	vmw_fifo_resource_dec(dev_priv);
673543831cfSThomas Hellstrom 
674543831cfSThomas Hellstrom 	return 0;
675543831cfSThomas Hellstrom }
676543831cfSThomas Hellstrom 
677ea716197SLee Jones /*
678543831cfSThomas Hellstrom  * User-space context management:
679543831cfSThomas Hellstrom  */
680543831cfSThomas Hellstrom 
681543831cfSThomas Hellstrom static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object * base)682543831cfSThomas Hellstrom vmw_user_context_base_to_res(struct ttm_base_object *base)
683543831cfSThomas Hellstrom {
684543831cfSThomas Hellstrom 	return &(container_of(base, struct vmw_user_context, base)->res);
685543831cfSThomas Hellstrom }
686543831cfSThomas Hellstrom 
vmw_user_context_free(struct vmw_resource * res)687543831cfSThomas Hellstrom static void vmw_user_context_free(struct vmw_resource *res)
688543831cfSThomas Hellstrom {
689543831cfSThomas Hellstrom 	struct vmw_user_context *ctx =
690543831cfSThomas Hellstrom 	    container_of(res, struct vmw_user_context, res);
691543831cfSThomas Hellstrom 
692d80efd5cSThomas Hellstrom 	if (ctx->cbs)
693d80efd5cSThomas Hellstrom 		vmw_binding_state_free(ctx->cbs);
694fd11a3c0SSinclair Yeh 
695fd11a3c0SSinclair Yeh 	(void) vmw_context_bind_dx_query(res, NULL);
696fd11a3c0SSinclair Yeh 
697543831cfSThomas Hellstrom 	ttm_base_object_kfree(ctx, base);
698543831cfSThomas Hellstrom }
699543831cfSThomas Hellstrom 
700ea716197SLee Jones /*
701543831cfSThomas Hellstrom  * This function is called when user space has no more references on the
702543831cfSThomas Hellstrom  * base object. It releases the base-object's reference on the resource object.
703543831cfSThomas Hellstrom  */
704543831cfSThomas Hellstrom 
vmw_user_context_base_release(struct ttm_base_object ** p_base)705543831cfSThomas Hellstrom static void vmw_user_context_base_release(struct ttm_base_object **p_base)
706543831cfSThomas Hellstrom {
707543831cfSThomas Hellstrom 	struct ttm_base_object *base = *p_base;
708543831cfSThomas Hellstrom 	struct vmw_user_context *ctx =
709543831cfSThomas Hellstrom 	    container_of(base, struct vmw_user_context, base);
710543831cfSThomas Hellstrom 	struct vmw_resource *res = &ctx->res;
711543831cfSThomas Hellstrom 
712543831cfSThomas Hellstrom 	*p_base = NULL;
713543831cfSThomas Hellstrom 	vmw_resource_unreference(&res);
714543831cfSThomas Hellstrom }
715543831cfSThomas Hellstrom 
vmw_context_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)716543831cfSThomas Hellstrom int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
717543831cfSThomas Hellstrom 			      struct drm_file *file_priv)
718543831cfSThomas Hellstrom {
719543831cfSThomas Hellstrom 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
720543831cfSThomas Hellstrom 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
721543831cfSThomas Hellstrom 
7228afa13a0SZack Rusin 	return ttm_ref_object_base_unref(tfile, arg->cid);
723543831cfSThomas Hellstrom }
724543831cfSThomas Hellstrom 
vmw_context_define(struct drm_device * dev,void * data,struct drm_file * file_priv,bool dx)725d80efd5cSThomas Hellstrom static int vmw_context_define(struct drm_device *dev, void *data,
726d80efd5cSThomas Hellstrom 			      struct drm_file *file_priv, bool dx)
727543831cfSThomas Hellstrom {
728543831cfSThomas Hellstrom 	struct vmw_private *dev_priv = vmw_priv(dev);
729543831cfSThomas Hellstrom 	struct vmw_user_context *ctx;
730543831cfSThomas Hellstrom 	struct vmw_resource *res;
731543831cfSThomas Hellstrom 	struct vmw_resource *tmp;
732543831cfSThomas Hellstrom 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
733543831cfSThomas Hellstrom 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
734543831cfSThomas Hellstrom 	int ret;
735543831cfSThomas Hellstrom 
736878c6ecdSDeepak Rawat 	if (!has_sm4_context(dev_priv) && dx) {
7375724f899SDeepak Rawat 		VMW_DEBUG_USER("DX contexts not supported by device.\n");
738d80efd5cSThomas Hellstrom 		return -EINVAL;
739d80efd5cSThomas Hellstrom 	}
740543831cfSThomas Hellstrom 
741543831cfSThomas Hellstrom 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
7421a4adb05SRavikant B Sharma 	if (unlikely(!ctx)) {
743543831cfSThomas Hellstrom 		ret = -ENOMEM;
7448211783fSZack Rusin 		goto out_ret;
745543831cfSThomas Hellstrom 	}
746543831cfSThomas Hellstrom 
747543831cfSThomas Hellstrom 	res = &ctx->res;
748543831cfSThomas Hellstrom 	ctx->base.shareable = false;
749543831cfSThomas Hellstrom 	ctx->base.tfile = NULL;
750543831cfSThomas Hellstrom 
751543831cfSThomas Hellstrom 	/*
752543831cfSThomas Hellstrom 	 * From here on, the destructor takes over resource freeing.
753543831cfSThomas Hellstrom 	 */
754543831cfSThomas Hellstrom 
755d80efd5cSThomas Hellstrom 	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
756543831cfSThomas Hellstrom 	if (unlikely(ret != 0))
7578211783fSZack Rusin 		goto out_ret;
758543831cfSThomas Hellstrom 
759543831cfSThomas Hellstrom 	tmp = vmw_resource_reference(&ctx->res);
760543831cfSThomas Hellstrom 	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
7618afa13a0SZack Rusin 				   &vmw_user_context_base_release);
762543831cfSThomas Hellstrom 
763543831cfSThomas Hellstrom 	if (unlikely(ret != 0)) {
764543831cfSThomas Hellstrom 		vmw_resource_unreference(&tmp);
765543831cfSThomas Hellstrom 		goto out_err;
766543831cfSThomas Hellstrom 	}
767543831cfSThomas Hellstrom 
768c7eae626SThomas Hellstrom 	arg->cid = ctx->base.handle;
769543831cfSThomas Hellstrom out_err:
770543831cfSThomas Hellstrom 	vmw_resource_unreference(&res);
7718211783fSZack Rusin out_ret:
772543831cfSThomas Hellstrom 	return ret;
773543831cfSThomas Hellstrom }
774b5c3b1a6SThomas Hellstrom 
vmw_context_define_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)775d80efd5cSThomas Hellstrom int vmw_context_define_ioctl(struct drm_device *dev, void *data,
776d80efd5cSThomas Hellstrom 			     struct drm_file *file_priv)
777b5c3b1a6SThomas Hellstrom {
778d80efd5cSThomas Hellstrom 	return vmw_context_define(dev, data, file_priv, false);
779b5c3b1a6SThomas Hellstrom }
780b5c3b1a6SThomas Hellstrom 
vmw_extended_context_define_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)781d80efd5cSThomas Hellstrom int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
782d80efd5cSThomas Hellstrom 				      struct drm_file *file_priv)
783b5c3b1a6SThomas Hellstrom {
784d80efd5cSThomas Hellstrom 	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
785d80efd5cSThomas Hellstrom 	struct drm_vmw_context_arg *rep = &arg->rep;
786b5c3b1a6SThomas Hellstrom 
787d80efd5cSThomas Hellstrom 	switch (arg->req) {
788d80efd5cSThomas Hellstrom 	case drm_vmw_context_legacy:
789d80efd5cSThomas Hellstrom 		return vmw_context_define(dev, rep, file_priv, false);
790d80efd5cSThomas Hellstrom 	case drm_vmw_context_dx:
791d80efd5cSThomas Hellstrom 		return vmw_context_define(dev, rep, file_priv, true);
792b5c3b1a6SThomas Hellstrom 	default:
793173fb7d4SThomas Hellstrom 		break;
794173fb7d4SThomas Hellstrom 	}
795d80efd5cSThomas Hellstrom 	return -EINVAL;
79630f82d81SThomas Hellstrom }
79730f82d81SThomas Hellstrom 
79830f82d81SThomas Hellstrom /**
79930f82d81SThomas Hellstrom  * vmw_context_binding_list - Return a list of context bindings
80030f82d81SThomas Hellstrom  *
80130f82d81SThomas Hellstrom  * @ctx: The context resource
80230f82d81SThomas Hellstrom  *
80330f82d81SThomas Hellstrom  * Returns the current list of bindings of the given context. Note that
80430f82d81SThomas Hellstrom  * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
80530f82d81SThomas Hellstrom  */
vmw_context_binding_list(struct vmw_resource * ctx)80630f82d81SThomas Hellstrom struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
80730f82d81SThomas Hellstrom {
808d80efd5cSThomas Hellstrom 	struct vmw_user_context *uctx =
809d80efd5cSThomas Hellstrom 		container_of(ctx, struct vmw_user_context, res);
810d80efd5cSThomas Hellstrom 
811d80efd5cSThomas Hellstrom 	return vmw_binding_state_list(uctx->cbs);
81230f82d81SThomas Hellstrom }
81318e4a466SThomas Hellstrom 
vmw_context_res_man(struct vmw_resource * ctx)81418e4a466SThomas Hellstrom struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
81518e4a466SThomas Hellstrom {
81618e4a466SThomas Hellstrom 	return container_of(ctx, struct vmw_user_context, res)->man;
81718e4a466SThomas Hellstrom }
818d80efd5cSThomas Hellstrom 
vmw_context_cotable(struct vmw_resource * ctx,SVGACOTableType cotable_type)819d80efd5cSThomas Hellstrom struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
820d80efd5cSThomas Hellstrom 					 SVGACOTableType cotable_type)
821d80efd5cSThomas Hellstrom {
8225e8ec0d9SDeepak Rawat 	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
8235e8ec0d9SDeepak Rawat 		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
8245e8ec0d9SDeepak Rawat 
8255e8ec0d9SDeepak Rawat 	if (cotable_type >= cotable_max)
826d80efd5cSThomas Hellstrom 		return ERR_PTR(-EINVAL);
827d80efd5cSThomas Hellstrom 
8281b9a01d6SThomas Hellstrom 	return container_of(ctx, struct vmw_user_context, res)->
8291b9a01d6SThomas Hellstrom 		cotables[cotable_type];
830d80efd5cSThomas Hellstrom }
831d80efd5cSThomas Hellstrom 
832d80efd5cSThomas Hellstrom /**
833d80efd5cSThomas Hellstrom  * vmw_context_binding_state -
834d80efd5cSThomas Hellstrom  * Return a pointer to a context binding state structure
835d80efd5cSThomas Hellstrom  *
836d80efd5cSThomas Hellstrom  * @ctx: The context resource
837d80efd5cSThomas Hellstrom  *
838d80efd5cSThomas Hellstrom  * Returns the current state of bindings of the given context. Note that
839d80efd5cSThomas Hellstrom  * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
840d80efd5cSThomas Hellstrom  */
841d80efd5cSThomas Hellstrom struct vmw_ctx_binding_state *
vmw_context_binding_state(struct vmw_resource * ctx)842d80efd5cSThomas Hellstrom vmw_context_binding_state(struct vmw_resource *ctx)
843d80efd5cSThomas Hellstrom {
844d80efd5cSThomas Hellstrom 	return container_of(ctx, struct vmw_user_context, res)->cbs;
845d80efd5cSThomas Hellstrom }
846fd11a3c0SSinclair Yeh 
847fd11a3c0SSinclair Yeh /**
848fd11a3c0SSinclair Yeh  * vmw_context_bind_dx_query -
849fd11a3c0SSinclair Yeh  * Sets query MOB for the context.  If @mob is NULL, then this function will
850fd11a3c0SSinclair Yeh  * remove the association between the MOB and the context.  This function
851fd11a3c0SSinclair Yeh  * assumes the binding_mutex is held.
852fd11a3c0SSinclair Yeh  *
853fd11a3c0SSinclair Yeh  * @ctx_res: The context resource
854fd11a3c0SSinclair Yeh  * @mob: a reference to the query MOB
855fd11a3c0SSinclair Yeh  *
856fd11a3c0SSinclair Yeh  * Returns -EINVAL if a MOB has already been set and does not match the one
857fd11a3c0SSinclair Yeh  * specified in the parameter.  0 otherwise.
858fd11a3c0SSinclair Yeh  */
vmw_context_bind_dx_query(struct vmw_resource * ctx_res,struct vmw_bo * mob)859fd11a3c0SSinclair Yeh int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
86009881d29SZack Rusin 			      struct vmw_bo *mob)
861fd11a3c0SSinclair Yeh {
862fd11a3c0SSinclair Yeh 	struct vmw_user_context *uctx =
863fd11a3c0SSinclair Yeh 		container_of(ctx_res, struct vmw_user_context, res);
864fd11a3c0SSinclair Yeh 
865fd11a3c0SSinclair Yeh 	if (mob == NULL) {
866fd11a3c0SSinclair Yeh 		if (uctx->dx_query_mob) {
867fd11a3c0SSinclair Yeh 			uctx->dx_query_mob->dx_query_ctx = NULL;
868f1d34bfdSThomas Hellstrom 			vmw_bo_unreference(&uctx->dx_query_mob);
869fd11a3c0SSinclair Yeh 			uctx->dx_query_mob = NULL;
870fd11a3c0SSinclair Yeh 		}
871fd11a3c0SSinclair Yeh 
872fd11a3c0SSinclair Yeh 		return 0;
873fd11a3c0SSinclair Yeh 	}
874fd11a3c0SSinclair Yeh 
875fd11a3c0SSinclair Yeh 	/* Can only have one MOB per context for queries */
876fd11a3c0SSinclair Yeh 	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
877fd11a3c0SSinclair Yeh 		return -EINVAL;
878fd11a3c0SSinclair Yeh 
879fd11a3c0SSinclair Yeh 	mob->dx_query_ctx  = ctx_res;
880fd11a3c0SSinclair Yeh 
881fd11a3c0SSinclair Yeh 	if (!uctx->dx_query_mob)
882f1d34bfdSThomas Hellstrom 		uctx->dx_query_mob = vmw_bo_reference(mob);
883fd11a3c0SSinclair Yeh 
884fd11a3c0SSinclair Yeh 	return 0;
885fd11a3c0SSinclair Yeh }
886fd11a3c0SSinclair Yeh 
887fd11a3c0SSinclair Yeh /**
888fd11a3c0SSinclair Yeh  * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
889fd11a3c0SSinclair Yeh  *
890fd11a3c0SSinclair Yeh  * @ctx_res: The context resource
891fd11a3c0SSinclair Yeh  */
89209881d29SZack Rusin struct vmw_bo *
vmw_context_get_dx_query_mob(struct vmw_resource * ctx_res)893fd11a3c0SSinclair Yeh vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
894fd11a3c0SSinclair Yeh {
895fd11a3c0SSinclair Yeh 	struct vmw_user_context *uctx =
896fd11a3c0SSinclair Yeh 		container_of(ctx_res, struct vmw_user_context, res);
897fd11a3c0SSinclair Yeh 
898fd11a3c0SSinclair Yeh 	return uctx->dx_query_mob;
899fd11a3c0SSinclair Yeh }
900