1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_placement.h>
29 
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_binding.h"
33 
34 struct vmw_user_context {
35 	struct ttm_base_object base;
36 	struct vmw_resource res;
37 	struct vmw_ctx_binding_state *cbs;
38 	struct vmw_cmdbuf_res_manager *man;
39 	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
40 	spinlock_t cotable_lock;
41 	struct vmw_dma_buffer *dx_query_mob;
42 };
43 
44 static void vmw_user_context_free(struct vmw_resource *res);
45 static struct vmw_resource *
46 vmw_user_context_base_to_res(struct ttm_base_object *base);
47 
48 static int vmw_gb_context_create(struct vmw_resource *res);
49 static int vmw_gb_context_bind(struct vmw_resource *res,
50 			       struct ttm_validate_buffer *val_buf);
51 static int vmw_gb_context_unbind(struct vmw_resource *res,
52 				 bool readback,
53 				 struct ttm_validate_buffer *val_buf);
54 static int vmw_gb_context_destroy(struct vmw_resource *res);
55 static int vmw_dx_context_create(struct vmw_resource *res);
56 static int vmw_dx_context_bind(struct vmw_resource *res,
57 			       struct ttm_validate_buffer *val_buf);
58 static int vmw_dx_context_unbind(struct vmw_resource *res,
59 				 bool readback,
60 				 struct ttm_validate_buffer *val_buf);
61 static int vmw_dx_context_destroy(struct vmw_resource *res);
62 
63 static uint64_t vmw_user_context_size;
64 
65 static const struct vmw_user_resource_conv user_context_conv = {
66 	.object_type = VMW_RES_CONTEXT,
67 	.base_obj_to_res = vmw_user_context_base_to_res,
68 	.res_free = vmw_user_context_free
69 };
70 
71 const struct vmw_user_resource_conv *user_context_converter =
72 	&user_context_conv;
73 
74 
75 static const struct vmw_res_func vmw_legacy_context_func = {
76 	.res_type = vmw_res_context,
77 	.needs_backup = false,
78 	.may_evict = false,
79 	.type_name = "legacy contexts",
80 	.backup_placement = NULL,
81 	.create = NULL,
82 	.destroy = NULL,
83 	.bind = NULL,
84 	.unbind = NULL
85 };
86 
87 static const struct vmw_res_func vmw_gb_context_func = {
88 	.res_type = vmw_res_context,
89 	.needs_backup = true,
90 	.may_evict = true,
91 	.type_name = "guest backed contexts",
92 	.backup_placement = &vmw_mob_placement,
93 	.create = vmw_gb_context_create,
94 	.destroy = vmw_gb_context_destroy,
95 	.bind = vmw_gb_context_bind,
96 	.unbind = vmw_gb_context_unbind
97 };
98 
99 static const struct vmw_res_func vmw_dx_context_func = {
100 	.res_type = vmw_res_dx_context,
101 	.needs_backup = true,
102 	.may_evict = true,
103 	.type_name = "dx contexts",
104 	.backup_placement = &vmw_mob_placement,
105 	.create = vmw_dx_context_create,
106 	.destroy = vmw_dx_context_destroy,
107 	.bind = vmw_dx_context_bind,
108 	.unbind = vmw_dx_context_unbind
109 };
110 
111 /**
112  * Context management:
113  */
114 
115 static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
116 {
117 	struct vmw_resource *res;
118 	int i;
119 
120 	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
121 		spin_lock(&uctx->cotable_lock);
122 		res = uctx->cotables[i];
123 		uctx->cotables[i] = NULL;
124 		spin_unlock(&uctx->cotable_lock);
125 
126 		if (res)
127 			vmw_resource_unreference(&res);
128 	}
129 }
130 
131 static void vmw_hw_context_destroy(struct vmw_resource *res)
132 {
133 	struct vmw_user_context *uctx =
134 		container_of(res, struct vmw_user_context, res);
135 	struct vmw_private *dev_priv = res->dev_priv;
136 	struct {
137 		SVGA3dCmdHeader header;
138 		SVGA3dCmdDestroyContext body;
139 	} *cmd;
140 
141 
142 	if (res->func->destroy == vmw_gb_context_destroy ||
143 	    res->func->destroy == vmw_dx_context_destroy) {
144 		mutex_lock(&dev_priv->cmdbuf_mutex);
145 		vmw_cmdbuf_res_man_destroy(uctx->man);
146 		mutex_lock(&dev_priv->binding_mutex);
147 		vmw_binding_state_kill(uctx->cbs);
148 		(void) res->func->destroy(res);
149 		mutex_unlock(&dev_priv->binding_mutex);
150 		if (dev_priv->pinned_bo != NULL &&
151 		    !dev_priv->query_cid_valid)
152 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
153 		mutex_unlock(&dev_priv->cmdbuf_mutex);
154 		vmw_context_cotables_unref(uctx);
155 		return;
156 	}
157 
158 	vmw_execbuf_release_pinned_bo(dev_priv);
159 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
160 	if (unlikely(cmd == NULL)) {
161 		DRM_ERROR("Failed reserving FIFO space for surface "
162 			  "destruction.\n");
163 		return;
164 	}
165 
166 	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
167 	cmd->header.size = sizeof(cmd->body);
168 	cmd->body.cid = res->id;
169 
170 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
171 	vmw_fifo_resource_dec(dev_priv);
172 }
173 
174 static int vmw_gb_context_init(struct vmw_private *dev_priv,
175 			       bool dx,
176 			       struct vmw_resource *res,
177 			       void (*res_free)(struct vmw_resource *res))
178 {
179 	int ret, i;
180 	struct vmw_user_context *uctx =
181 		container_of(res, struct vmw_user_context, res);
182 
183 	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
184 			    SVGA3D_CONTEXT_DATA_SIZE);
185 	ret = vmw_resource_init(dev_priv, res, true,
186 				res_free,
187 				dx ? &vmw_dx_context_func :
188 				&vmw_gb_context_func);
189 	if (unlikely(ret != 0))
190 		goto out_err;
191 
192 	if (dev_priv->has_mob) {
193 		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
194 		if (IS_ERR(uctx->man)) {
195 			ret = PTR_ERR(uctx->man);
196 			uctx->man = NULL;
197 			goto out_err;
198 		}
199 	}
200 
201 	uctx->cbs = vmw_binding_state_alloc(dev_priv);
202 	if (IS_ERR(uctx->cbs)) {
203 		ret = PTR_ERR(uctx->cbs);
204 		goto out_err;
205 	}
206 
207 	spin_lock_init(&uctx->cotable_lock);
208 
209 	if (dx) {
210 		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
211 			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
212 							      &uctx->res, i);
213 			if (unlikely(IS_ERR(uctx->cotables[i]))) {
214 				ret = PTR_ERR(uctx->cotables[i]);
215 				goto out_cotables;
216 			}
217 		}
218 	}
219 
220 
221 
222 	vmw_resource_activate(res, vmw_hw_context_destroy);
223 	return 0;
224 
225 out_cotables:
226 	vmw_context_cotables_unref(uctx);
227 out_err:
228 	if (res_free)
229 		res_free(res);
230 	else
231 		kfree(res);
232 	return ret;
233 }
234 
235 static int vmw_context_init(struct vmw_private *dev_priv,
236 			    struct vmw_resource *res,
237 			    void (*res_free)(struct vmw_resource *res),
238 			    bool dx)
239 {
240 	int ret;
241 
242 	struct {
243 		SVGA3dCmdHeader header;
244 		SVGA3dCmdDefineContext body;
245 	} *cmd;
246 
247 	if (dev_priv->has_mob)
248 		return vmw_gb_context_init(dev_priv, dx, res, res_free);
249 
250 	ret = vmw_resource_init(dev_priv, res, false,
251 				res_free, &vmw_legacy_context_func);
252 
253 	if (unlikely(ret != 0)) {
254 		DRM_ERROR("Failed to allocate a resource id.\n");
255 		goto out_early;
256 	}
257 
258 	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
259 		DRM_ERROR("Out of hw context ids.\n");
260 		vmw_resource_unreference(&res);
261 		return -ENOMEM;
262 	}
263 
264 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
265 	if (unlikely(cmd == NULL)) {
266 		DRM_ERROR("Fifo reserve failed.\n");
267 		vmw_resource_unreference(&res);
268 		return -ENOMEM;
269 	}
270 
271 	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
272 	cmd->header.size = sizeof(cmd->body);
273 	cmd->body.cid = res->id;
274 
275 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
276 	vmw_fifo_resource_inc(dev_priv);
277 	vmw_resource_activate(res, vmw_hw_context_destroy);
278 	return 0;
279 
280 out_early:
281 	if (res_free == NULL)
282 		kfree(res);
283 	else
284 		res_free(res);
285 	return ret;
286 }
287 
288 
289 /*
290  * GB context.
291  */
292 
293 static int vmw_gb_context_create(struct vmw_resource *res)
294 {
295 	struct vmw_private *dev_priv = res->dev_priv;
296 	int ret;
297 	struct {
298 		SVGA3dCmdHeader header;
299 		SVGA3dCmdDefineGBContext body;
300 	} *cmd;
301 
302 	if (likely(res->id != -1))
303 		return 0;
304 
305 	ret = vmw_resource_alloc_id(res);
306 	if (unlikely(ret != 0)) {
307 		DRM_ERROR("Failed to allocate a context id.\n");
308 		goto out_no_id;
309 	}
310 
311 	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
312 		ret = -EBUSY;
313 		goto out_no_fifo;
314 	}
315 
316 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
317 	if (unlikely(cmd == NULL)) {
318 		DRM_ERROR("Failed reserving FIFO space for context "
319 			  "creation.\n");
320 		ret = -ENOMEM;
321 		goto out_no_fifo;
322 	}
323 
324 	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
325 	cmd->header.size = sizeof(cmd->body);
326 	cmd->body.cid = res->id;
327 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
328 	vmw_fifo_resource_inc(dev_priv);
329 
330 	return 0;
331 
332 out_no_fifo:
333 	vmw_resource_release_id(res);
334 out_no_id:
335 	return ret;
336 }
337 
338 static int vmw_gb_context_bind(struct vmw_resource *res,
339 			       struct ttm_validate_buffer *val_buf)
340 {
341 	struct vmw_private *dev_priv = res->dev_priv;
342 	struct {
343 		SVGA3dCmdHeader header;
344 		SVGA3dCmdBindGBContext body;
345 	} *cmd;
346 	struct ttm_buffer_object *bo = val_buf->bo;
347 
348 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
349 
350 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
351 	if (unlikely(cmd == NULL)) {
352 		DRM_ERROR("Failed reserving FIFO space for context "
353 			  "binding.\n");
354 		return -ENOMEM;
355 	}
356 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
357 	cmd->header.size = sizeof(cmd->body);
358 	cmd->body.cid = res->id;
359 	cmd->body.mobid = bo->mem.start;
360 	cmd->body.validContents = res->backup_dirty;
361 	res->backup_dirty = false;
362 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
363 
364 	return 0;
365 }
366 
367 static int vmw_gb_context_unbind(struct vmw_resource *res,
368 				 bool readback,
369 				 struct ttm_validate_buffer *val_buf)
370 {
371 	struct vmw_private *dev_priv = res->dev_priv;
372 	struct ttm_buffer_object *bo = val_buf->bo;
373 	struct vmw_fence_obj *fence;
374 	struct vmw_user_context *uctx =
375 		container_of(res, struct vmw_user_context, res);
376 
377 	struct {
378 		SVGA3dCmdHeader header;
379 		SVGA3dCmdReadbackGBContext body;
380 	} *cmd1;
381 	struct {
382 		SVGA3dCmdHeader header;
383 		SVGA3dCmdBindGBContext body;
384 	} *cmd2;
385 	uint32_t submit_size;
386 	uint8_t *cmd;
387 
388 
389 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
390 
391 	mutex_lock(&dev_priv->binding_mutex);
392 	vmw_binding_state_scrub(uctx->cbs);
393 
394 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
395 
396 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
397 	if (unlikely(cmd == NULL)) {
398 		DRM_ERROR("Failed reserving FIFO space for context "
399 			  "unbinding.\n");
400 		mutex_unlock(&dev_priv->binding_mutex);
401 		return -ENOMEM;
402 	}
403 
404 	cmd2 = (void *) cmd;
405 	if (readback) {
406 		cmd1 = (void *) cmd;
407 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
408 		cmd1->header.size = sizeof(cmd1->body);
409 		cmd1->body.cid = res->id;
410 		cmd2 = (void *) (&cmd1[1]);
411 	}
412 	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
413 	cmd2->header.size = sizeof(cmd2->body);
414 	cmd2->body.cid = res->id;
415 	cmd2->body.mobid = SVGA3D_INVALID_ID;
416 
417 	vmw_fifo_commit(dev_priv, submit_size);
418 	mutex_unlock(&dev_priv->binding_mutex);
419 
420 	/*
421 	 * Create a fence object and fence the backup buffer.
422 	 */
423 
424 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
425 					  &fence, NULL);
426 
427 	vmw_fence_single_bo(bo, fence);
428 
429 	if (likely(fence != NULL))
430 		vmw_fence_obj_unreference(&fence);
431 
432 	return 0;
433 }
434 
435 static int vmw_gb_context_destroy(struct vmw_resource *res)
436 {
437 	struct vmw_private *dev_priv = res->dev_priv;
438 	struct {
439 		SVGA3dCmdHeader header;
440 		SVGA3dCmdDestroyGBContext body;
441 	} *cmd;
442 
443 	if (likely(res->id == -1))
444 		return 0;
445 
446 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
447 	if (unlikely(cmd == NULL)) {
448 		DRM_ERROR("Failed reserving FIFO space for context "
449 			  "destruction.\n");
450 		return -ENOMEM;
451 	}
452 
453 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
454 	cmd->header.size = sizeof(cmd->body);
455 	cmd->body.cid = res->id;
456 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
457 	if (dev_priv->query_cid == res->id)
458 		dev_priv->query_cid_valid = false;
459 	vmw_resource_release_id(res);
460 	vmw_fifo_resource_dec(dev_priv);
461 
462 	return 0;
463 }
464 
465 /*
466  * DX context.
467  */
468 
469 static int vmw_dx_context_create(struct vmw_resource *res)
470 {
471 	struct vmw_private *dev_priv = res->dev_priv;
472 	int ret;
473 	struct {
474 		SVGA3dCmdHeader header;
475 		SVGA3dCmdDXDefineContext body;
476 	} *cmd;
477 
478 	if (likely(res->id != -1))
479 		return 0;
480 
481 	ret = vmw_resource_alloc_id(res);
482 	if (unlikely(ret != 0)) {
483 		DRM_ERROR("Failed to allocate a context id.\n");
484 		goto out_no_id;
485 	}
486 
487 	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
488 		ret = -EBUSY;
489 		goto out_no_fifo;
490 	}
491 
492 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
493 	if (unlikely(cmd == NULL)) {
494 		DRM_ERROR("Failed reserving FIFO space for context "
495 			  "creation.\n");
496 		ret = -ENOMEM;
497 		goto out_no_fifo;
498 	}
499 
500 	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
501 	cmd->header.size = sizeof(cmd->body);
502 	cmd->body.cid = res->id;
503 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
504 	vmw_fifo_resource_inc(dev_priv);
505 
506 	return 0;
507 
508 out_no_fifo:
509 	vmw_resource_release_id(res);
510 out_no_id:
511 	return ret;
512 }
513 
514 static int vmw_dx_context_bind(struct vmw_resource *res,
515 			       struct ttm_validate_buffer *val_buf)
516 {
517 	struct vmw_private *dev_priv = res->dev_priv;
518 	struct {
519 		SVGA3dCmdHeader header;
520 		SVGA3dCmdDXBindContext body;
521 	} *cmd;
522 	struct ttm_buffer_object *bo = val_buf->bo;
523 
524 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
525 
526 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
527 	if (unlikely(cmd == NULL)) {
528 		DRM_ERROR("Failed reserving FIFO space for context "
529 			  "binding.\n");
530 		return -ENOMEM;
531 	}
532 
533 	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
534 	cmd->header.size = sizeof(cmd->body);
535 	cmd->body.cid = res->id;
536 	cmd->body.mobid = bo->mem.start;
537 	cmd->body.validContents = res->backup_dirty;
538 	res->backup_dirty = false;
539 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
540 
541 
542 	return 0;
543 }
544 
545 /**
546  * vmw_dx_context_scrub_cotables - Scrub all bindings and
547  * cotables from a context
548  *
549  * @ctx: Pointer to the context resource
550  * @readback: Whether to save the otable contents on scrubbing.
551  *
552  * COtables must be unbound before their context, but unbinding requires
553  * the backup buffer being reserved, whereas scrubbing does not.
554  * This function scrubs all cotables of a context, potentially reading back
555  * the contents into their backup buffers. However, scrubbing cotables
556  * also makes the device context invalid, so scrub all bindings first so
557  * that doesn't have to be done later with an invalid context.
558  */
559 void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
560 				   bool readback)
561 {
562 	struct vmw_user_context *uctx =
563 		container_of(ctx, struct vmw_user_context, res);
564 	int i;
565 
566 	vmw_binding_state_scrub(uctx->cbs);
567 	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
568 		struct vmw_resource *res;
569 
570 		/* Avoid racing with ongoing cotable destruction. */
571 		spin_lock(&uctx->cotable_lock);
572 		res = uctx->cotables[vmw_cotable_scrub_order[i]];
573 		if (res)
574 			res = vmw_resource_reference_unless_doomed(res);
575 		spin_unlock(&uctx->cotable_lock);
576 		if (!res)
577 			continue;
578 
579 		WARN_ON(vmw_cotable_scrub(res, readback));
580 		vmw_resource_unreference(&res);
581 	}
582 }
583 
584 static int vmw_dx_context_unbind(struct vmw_resource *res,
585 				 bool readback,
586 				 struct ttm_validate_buffer *val_buf)
587 {
588 	struct vmw_private *dev_priv = res->dev_priv;
589 	struct ttm_buffer_object *bo = val_buf->bo;
590 	struct vmw_fence_obj *fence;
591 	struct vmw_user_context *uctx =
592 		container_of(res, struct vmw_user_context, res);
593 
594 	struct {
595 		SVGA3dCmdHeader header;
596 		SVGA3dCmdDXReadbackContext body;
597 	} *cmd1;
598 	struct {
599 		SVGA3dCmdHeader header;
600 		SVGA3dCmdDXBindContext body;
601 	} *cmd2;
602 	uint32_t submit_size;
603 	uint8_t *cmd;
604 
605 
606 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
607 
608 	mutex_lock(&dev_priv->binding_mutex);
609 	vmw_dx_context_scrub_cotables(res, readback);
610 
611 	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
612 	    readback) {
613 		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
614 		if (vmw_query_readback_all(uctx->dx_query_mob))
615 			DRM_ERROR("Failed to read back query states\n");
616 	}
617 
618 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
619 
620 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
621 	if (unlikely(cmd == NULL)) {
622 		DRM_ERROR("Failed reserving FIFO space for context "
623 			  "unbinding.\n");
624 		mutex_unlock(&dev_priv->binding_mutex);
625 		return -ENOMEM;
626 	}
627 
628 	cmd2 = (void *) cmd;
629 	if (readback) {
630 		cmd1 = (void *) cmd;
631 		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
632 		cmd1->header.size = sizeof(cmd1->body);
633 		cmd1->body.cid = res->id;
634 		cmd2 = (void *) (&cmd1[1]);
635 	}
636 	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
637 	cmd2->header.size = sizeof(cmd2->body);
638 	cmd2->body.cid = res->id;
639 	cmd2->body.mobid = SVGA3D_INVALID_ID;
640 
641 	vmw_fifo_commit(dev_priv, submit_size);
642 	mutex_unlock(&dev_priv->binding_mutex);
643 
644 	/*
645 	 * Create a fence object and fence the backup buffer.
646 	 */
647 
648 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
649 					  &fence, NULL);
650 
651 	vmw_fence_single_bo(bo, fence);
652 
653 	if (likely(fence != NULL))
654 		vmw_fence_obj_unreference(&fence);
655 
656 	return 0;
657 }
658 
659 static int vmw_dx_context_destroy(struct vmw_resource *res)
660 {
661 	struct vmw_private *dev_priv = res->dev_priv;
662 	struct {
663 		SVGA3dCmdHeader header;
664 		SVGA3dCmdDXDestroyContext body;
665 	} *cmd;
666 
667 	if (likely(res->id == -1))
668 		return 0;
669 
670 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
671 	if (unlikely(cmd == NULL)) {
672 		DRM_ERROR("Failed reserving FIFO space for context "
673 			  "destruction.\n");
674 		return -ENOMEM;
675 	}
676 
677 	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
678 	cmd->header.size = sizeof(cmd->body);
679 	cmd->body.cid = res->id;
680 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
681 	if (dev_priv->query_cid == res->id)
682 		dev_priv->query_cid_valid = false;
683 	vmw_resource_release_id(res);
684 	vmw_fifo_resource_dec(dev_priv);
685 
686 	return 0;
687 }
688 
689 /**
690  * User-space context management:
691  */
692 
693 static struct vmw_resource *
694 vmw_user_context_base_to_res(struct ttm_base_object *base)
695 {
696 	return &(container_of(base, struct vmw_user_context, base)->res);
697 }
698 
699 static void vmw_user_context_free(struct vmw_resource *res)
700 {
701 	struct vmw_user_context *ctx =
702 	    container_of(res, struct vmw_user_context, res);
703 	struct vmw_private *dev_priv = res->dev_priv;
704 
705 	if (ctx->cbs)
706 		vmw_binding_state_free(ctx->cbs);
707 
708 	(void) vmw_context_bind_dx_query(res, NULL);
709 
710 	ttm_base_object_kfree(ctx, base);
711 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
712 			    vmw_user_context_size);
713 }
714 
715 /**
716  * This function is called when user space has no more references on the
717  * base object. It releases the base-object's reference on the resource object.
718  */
719 
720 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
721 {
722 	struct ttm_base_object *base = *p_base;
723 	struct vmw_user_context *ctx =
724 	    container_of(base, struct vmw_user_context, base);
725 	struct vmw_resource *res = &ctx->res;
726 
727 	*p_base = NULL;
728 	vmw_resource_unreference(&res);
729 }
730 
731 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
732 			      struct drm_file *file_priv)
733 {
734 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
735 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
736 
737 	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
738 }
739 
740 static int vmw_context_define(struct drm_device *dev, void *data,
741 			      struct drm_file *file_priv, bool dx)
742 {
743 	struct vmw_private *dev_priv = vmw_priv(dev);
744 	struct vmw_user_context *ctx;
745 	struct vmw_resource *res;
746 	struct vmw_resource *tmp;
747 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
748 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
749 	int ret;
750 
751 	if (!dev_priv->has_dx && dx) {
752 		DRM_ERROR("DX contexts not supported by device.\n");
753 		return -EINVAL;
754 	}
755 
756 	/*
757 	 * Approximate idr memory usage with 128 bytes. It will be limited
758 	 * by maximum number_of contexts anyway.
759 	 */
760 
761 	if (unlikely(vmw_user_context_size == 0))
762 		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
763 		  ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
764 
765 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
766 	if (unlikely(ret != 0))
767 		return ret;
768 
769 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
770 				   vmw_user_context_size,
771 				   false, true);
772 	if (unlikely(ret != 0)) {
773 		if (ret != -ERESTARTSYS)
774 			DRM_ERROR("Out of graphics memory for context"
775 				  " creation.\n");
776 		goto out_unlock;
777 	}
778 
779 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
780 	if (unlikely(!ctx)) {
781 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
782 				    vmw_user_context_size);
783 		ret = -ENOMEM;
784 		goto out_unlock;
785 	}
786 
787 	res = &ctx->res;
788 	ctx->base.shareable = false;
789 	ctx->base.tfile = NULL;
790 
791 	/*
792 	 * From here on, the destructor takes over resource freeing.
793 	 */
794 
795 	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
796 	if (unlikely(ret != 0))
797 		goto out_unlock;
798 
799 	tmp = vmw_resource_reference(&ctx->res);
800 	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
801 				   &vmw_user_context_base_release, NULL);
802 
803 	if (unlikely(ret != 0)) {
804 		vmw_resource_unreference(&tmp);
805 		goto out_err;
806 	}
807 
808 	arg->cid = ctx->base.hash.key;
809 out_err:
810 	vmw_resource_unreference(&res);
811 out_unlock:
812 	ttm_read_unlock(&dev_priv->reservation_sem);
813 	return ret;
814 }
815 
816 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
817 			     struct drm_file *file_priv)
818 {
819 	return vmw_context_define(dev, data, file_priv, false);
820 }
821 
822 int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
823 				      struct drm_file *file_priv)
824 {
825 	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
826 	struct drm_vmw_context_arg *rep = &arg->rep;
827 
828 	switch (arg->req) {
829 	case drm_vmw_context_legacy:
830 		return vmw_context_define(dev, rep, file_priv, false);
831 	case drm_vmw_context_dx:
832 		return vmw_context_define(dev, rep, file_priv, true);
833 	default:
834 		break;
835 	}
836 	return -EINVAL;
837 }
838 
839 /**
840  * vmw_context_binding_list - Return a list of context bindings
841  *
842  * @ctx: The context resource
843  *
844  * Returns the current list of bindings of the given context. Note that
845  * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
846  */
847 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
848 {
849 	struct vmw_user_context *uctx =
850 		container_of(ctx, struct vmw_user_context, res);
851 
852 	return vmw_binding_state_list(uctx->cbs);
853 }
854 
855 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
856 {
857 	return container_of(ctx, struct vmw_user_context, res)->man;
858 }
859 
860 struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
861 					 SVGACOTableType cotable_type)
862 {
863 	if (cotable_type >= SVGA_COTABLE_DX10_MAX)
864 		return ERR_PTR(-EINVAL);
865 
866 	return vmw_resource_reference
867 		(container_of(ctx, struct vmw_user_context, res)->
868 		 cotables[cotable_type]);
869 }
870 
871 /**
872  * vmw_context_binding_state -
873  * Return a pointer to a context binding state structure
874  *
875  * @ctx: The context resource
876  *
877  * Returns the current state of bindings of the given context. Note that
878  * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
879  */
880 struct vmw_ctx_binding_state *
881 vmw_context_binding_state(struct vmw_resource *ctx)
882 {
883 	return container_of(ctx, struct vmw_user_context, res)->cbs;
884 }
885 
886 /**
887  * vmw_context_bind_dx_query -
888  * Sets query MOB for the context.  If @mob is NULL, then this function will
889  * remove the association between the MOB and the context.  This function
890  * assumes the binding_mutex is held.
891  *
892  * @ctx_res: The context resource
893  * @mob: a reference to the query MOB
894  *
895  * Returns -EINVAL if a MOB has already been set and does not match the one
896  * specified in the parameter.  0 otherwise.
897  */
898 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
899 			      struct vmw_dma_buffer *mob)
900 {
901 	struct vmw_user_context *uctx =
902 		container_of(ctx_res, struct vmw_user_context, res);
903 
904 	if (mob == NULL) {
905 		if (uctx->dx_query_mob) {
906 			uctx->dx_query_mob->dx_query_ctx = NULL;
907 			vmw_dmabuf_unreference(&uctx->dx_query_mob);
908 			uctx->dx_query_mob = NULL;
909 		}
910 
911 		return 0;
912 	}
913 
914 	/* Can only have one MOB per context for queries */
915 	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
916 		return -EINVAL;
917 
918 	mob->dx_query_ctx  = ctx_res;
919 
920 	if (!uctx->dx_query_mob)
921 		uctx->dx_query_mob = vmw_dmabuf_reference(mob);
922 
923 	return 0;
924 }
925 
926 /**
927  * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
928  *
929  * @ctx_res: The context resource
930  */
931 struct vmw_dma_buffer *
932 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
933 {
934 	struct vmw_user_context *uctx =
935 		container_of(ctx_res, struct vmw_user_context, res);
936 
937 	return uctx->dx_query_mob;
938 }
939