1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/sync_file.h>
28 
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
35 #include "vmwgfx_mksstat.h"
36 
37 #define VMW_RES_HT_ORDER 12
38 
39 /*
40  * Helper macro to get dx_ctx_node if available otherwise print an error
41  * message. This is for use in command verifier function where if dx_ctx_node
42  * is not set then command is invalid.
43  */
44 #define VMW_GET_CTX_NODE(__sw_context)                                        \
45 ({                                                                            \
46 	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
47 		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
48 		__sw_context->dx_ctx_node;                                    \
49 	});                                                                   \
50 })
51 
52 #define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
53 	struct {                                                              \
54 		SVGA3dCmdHeader header;                                       \
55 		__type body;                                                  \
56 	} __var
57 
58 /**
59  * struct vmw_relocation - Buffer object relocation
60  *
61  * @head: List head for the command submission context's relocation list
62  * @vbo: Non ref-counted pointer to buffer object
63  * @mob_loc: Pointer to location for mob id to be modified
64  * @location: Pointer to location for guest pointer to be modified
65  */
66 struct vmw_relocation {
67 	struct list_head head;
68 	struct vmw_buffer_object *vbo;
69 	union {
70 		SVGAMobId *mob_loc;
71 		SVGAGuestPtr *location;
72 	};
73 };
74 
75 /**
76  * enum vmw_resource_relocation_type - Relocation type for resources
77  *
78  * @vmw_res_rel_normal: Traditional relocation. The resource id in the
79  * command stream is replaced with the actual id after validation.
80  * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
81  * with a NOP.
82  * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
83  * validation is -1, the command is replaced with a NOP. Otherwise no action.
84  * @vmw_res_rel_max: Last value in the enum - used for error checking
85 */
86 enum vmw_resource_relocation_type {
87 	vmw_res_rel_normal,
88 	vmw_res_rel_nop,
89 	vmw_res_rel_cond_nop,
90 	vmw_res_rel_max
91 };
92 
93 /**
94  * struct vmw_resource_relocation - Relocation info for resources
95  *
96  * @head: List head for the software context's relocation list.
97  * @res: Non-ref-counted pointer to the resource.
98  * @offset: Offset of single byte entries into the command buffer where the id
99  * that needs fixup is located.
100  * @rel_type: Type of relocation.
101  */
102 struct vmw_resource_relocation {
103 	struct list_head head;
104 	const struct vmw_resource *res;
105 	u32 offset:29;
106 	enum vmw_resource_relocation_type rel_type:3;
107 };
108 
109 /**
110  * struct vmw_ctx_validation_info - Extra validation metadata for contexts
111  *
112  * @head: List head of context list
113  * @ctx: The context resource
114  * @cur: The context's persistent binding state
115  * @staged: The binding state changes of this command buffer
116  */
117 struct vmw_ctx_validation_info {
118 	struct list_head head;
119 	struct vmw_resource *ctx;
120 	struct vmw_ctx_binding_state *cur;
121 	struct vmw_ctx_binding_state *staged;
122 };
123 
124 /**
125  * struct vmw_cmd_entry - Describe a command for the verifier
126  *
127  * @func: Call-back to handle the command.
128  * @user_allow: Whether allowed from the execbuf ioctl.
129  * @gb_disable: Whether disabled if guest-backed objects are available.
130  * @gb_enable: Whether enabled iff guest-backed objects are available.
131  * @cmd_name: Name of the command.
132  */
133 struct vmw_cmd_entry {
134 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
135 		     SVGA3dCmdHeader *);
136 	bool user_allow;
137 	bool gb_disable;
138 	bool gb_enable;
139 	const char *cmd_name;
140 };
141 
142 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
143 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
144 				       (_gb_disable), (_gb_enable), #_cmd}
145 
146 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
147 					struct vmw_sw_context *sw_context,
148 					struct vmw_resource *ctx);
149 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
150 				 struct vmw_sw_context *sw_context,
151 				 SVGAMobId *id,
152 				 struct vmw_buffer_object **vmw_bo_p);
153 /**
154  * vmw_ptr_diff - Compute the offset from a to b in bytes
155  *
156  * @a: A starting pointer.
157  * @b: A pointer offset in the same address space.
158  *
159  * Returns: The offset in bytes between the two pointers.
160  */
161 static size_t vmw_ptr_diff(void *a, void *b)
162 {
163 	return (unsigned long) b - (unsigned long) a;
164 }
165 
166 /**
167  * vmw_execbuf_bindings_commit - Commit modified binding state
168  *
169  * @sw_context: The command submission context
170  * @backoff: Whether this is part of the error path and binding state changes
171  * should be ignored
172  */
173 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
174 					bool backoff)
175 {
176 	struct vmw_ctx_validation_info *entry;
177 
178 	list_for_each_entry(entry, &sw_context->ctx_list, head) {
179 		if (!backoff)
180 			vmw_binding_state_commit(entry->cur, entry->staged);
181 
182 		if (entry->staged != sw_context->staged_bindings)
183 			vmw_binding_state_free(entry->staged);
184 		else
185 			sw_context->staged_bindings_inuse = false;
186 	}
187 
188 	/* List entries are freed with the validation context */
189 	INIT_LIST_HEAD(&sw_context->ctx_list);
190 }
191 
192 /**
193  * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
194  *
195  * @sw_context: The command submission context
196  */
197 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
198 {
199 	if (sw_context->dx_query_mob)
200 		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
201 					  sw_context->dx_query_mob);
202 }
203 
204 /**
205  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
206  * the validate list.
207  *
208  * @dev_priv: Pointer to the device private:
209  * @sw_context: The command submission context
210  * @res: Pointer to the resource
211  * @node: The validation node holding the context resource metadata
212  */
213 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
214 				   struct vmw_sw_context *sw_context,
215 				   struct vmw_resource *res,
216 				   struct vmw_ctx_validation_info *node)
217 {
218 	int ret;
219 
220 	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
221 	if (unlikely(ret != 0))
222 		goto out_err;
223 
224 	if (!sw_context->staged_bindings) {
225 		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
226 		if (IS_ERR(sw_context->staged_bindings)) {
227 			ret = PTR_ERR(sw_context->staged_bindings);
228 			sw_context->staged_bindings = NULL;
229 			goto out_err;
230 		}
231 	}
232 
233 	if (sw_context->staged_bindings_inuse) {
234 		node->staged = vmw_binding_state_alloc(dev_priv);
235 		if (IS_ERR(node->staged)) {
236 			ret = PTR_ERR(node->staged);
237 			node->staged = NULL;
238 			goto out_err;
239 		}
240 	} else {
241 		node->staged = sw_context->staged_bindings;
242 		sw_context->staged_bindings_inuse = true;
243 	}
244 
245 	node->ctx = res;
246 	node->cur = vmw_context_binding_state(res);
247 	list_add_tail(&node->head, &sw_context->ctx_list);
248 
249 	return 0;
250 
251 out_err:
252 	return ret;
253 }
254 
255 /**
256  * vmw_execbuf_res_size - calculate extra size fore the resource validation node
257  *
258  * @dev_priv: Pointer to the device private struct.
259  * @res_type: The resource type.
260  *
261  * Guest-backed contexts and DX contexts require extra size to store execbuf
262  * private information in the validation node. Typically the binding manager
263  * associated data structures.
264  *
265  * Returns: The extra size requirement based on resource type.
266  */
267 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
268 					 enum vmw_res_type res_type)
269 {
270 	return (res_type == vmw_res_dx_context ||
271 		(res_type == vmw_res_context && dev_priv->has_mob)) ?
272 		sizeof(struct vmw_ctx_validation_info) : 0;
273 }
274 
275 /**
276  * vmw_execbuf_rcache_update - Update a resource-node cache entry
277  *
278  * @rcache: Pointer to the entry to update.
279  * @res: Pointer to the resource.
280  * @private: Pointer to the execbuf-private space in the resource validation
281  * node.
282  */
283 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
284 				      struct vmw_resource *res,
285 				      void *private)
286 {
287 	rcache->res = res;
288 	rcache->private = private;
289 	rcache->valid = 1;
290 	rcache->valid_handle = 0;
291 }
292 
293 /**
294  * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
295  * rcu-protected pointer to the validation list.
296  *
297  * @sw_context: Pointer to the software context.
298  * @res: Unreferenced rcu-protected pointer to the resource.
299  * @dirty: Whether to change dirty status.
300  *
301  * Returns: 0 on success. Negative error code on failure. Typical error codes
302  * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
303  */
304 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
305 					 struct vmw_resource *res,
306 					 u32 dirty)
307 {
308 	struct vmw_private *dev_priv = res->dev_priv;
309 	int ret;
310 	enum vmw_res_type res_type = vmw_res_type(res);
311 	struct vmw_res_cache_entry *rcache;
312 	struct vmw_ctx_validation_info *ctx_info;
313 	bool first_usage;
314 	unsigned int priv_size;
315 
316 	rcache = &sw_context->res_cache[res_type];
317 	if (likely(rcache->valid && rcache->res == res)) {
318 		if (dirty)
319 			vmw_validation_res_set_dirty(sw_context->ctx,
320 						     rcache->private, dirty);
321 		vmw_user_resource_noref_release();
322 		return 0;
323 	}
324 
325 	priv_size = vmw_execbuf_res_size(dev_priv, res_type);
326 	ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
327 					  dirty, (void **)&ctx_info,
328 					  &first_usage);
329 	vmw_user_resource_noref_release();
330 	if (ret)
331 		return ret;
332 
333 	if (priv_size && first_usage) {
334 		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
335 					      ctx_info);
336 		if (ret) {
337 			VMW_DEBUG_USER("Failed first usage context setup.\n");
338 			return ret;
339 		}
340 	}
341 
342 	vmw_execbuf_rcache_update(rcache, res, ctx_info);
343 	return 0;
344 }
345 
346 /**
347  * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
348  * validation list if it's not already on it
349  *
350  * @sw_context: Pointer to the software context.
351  * @res: Pointer to the resource.
352  * @dirty: Whether to change dirty status.
353  *
354  * Returns: Zero on success. Negative error code on failure.
355  */
356 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
357 					 struct vmw_resource *res,
358 					 u32 dirty)
359 {
360 	struct vmw_res_cache_entry *rcache;
361 	enum vmw_res_type res_type = vmw_res_type(res);
362 	void *ptr;
363 	int ret;
364 
365 	rcache = &sw_context->res_cache[res_type];
366 	if (likely(rcache->valid && rcache->res == res)) {
367 		if (dirty)
368 			vmw_validation_res_set_dirty(sw_context->ctx,
369 						     rcache->private, dirty);
370 		return 0;
371 	}
372 
373 	ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
374 					  &ptr, NULL);
375 	if (ret)
376 		return ret;
377 
378 	vmw_execbuf_rcache_update(rcache, res, ptr);
379 
380 	return 0;
381 }
382 
383 /**
384  * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
385  * validation list
386  *
387  * @sw_context: The software context holding the validation list.
388  * @view: Pointer to the view resource.
389  *
390  * Returns 0 if success, negative error code otherwise.
391  */
392 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
393 				struct vmw_resource *view)
394 {
395 	int ret;
396 
397 	/*
398 	 * First add the resource the view is pointing to, otherwise it may be
399 	 * swapped out when the view is validated.
400 	 */
401 	ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
402 					    vmw_view_dirtying(view));
403 	if (ret)
404 		return ret;
405 
406 	return vmw_execbuf_res_noctx_val_add(sw_context, view,
407 					     VMW_RES_DIRTY_NONE);
408 }
409 
410 /**
411  * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
412  * to to the validation list.
413  *
414  * @sw_context: The software context holding the validation list.
415  * @view_type: The view type to look up.
416  * @id: view id of the view.
417  *
418  * The view is represented by a view id and the DX context it's created on, or
419  * scheduled for creation on. If there is no DX context set, the function will
420  * return an -EINVAL error pointer.
421  *
422  * Returns: Unreferenced pointer to the resource on success, negative error
423  * pointer on failure.
424  */
425 static struct vmw_resource *
426 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
427 		    enum vmw_view_type view_type, u32 id)
428 {
429 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
430 	struct vmw_resource *view;
431 	int ret;
432 
433 	if (!ctx_node)
434 		return ERR_PTR(-EINVAL);
435 
436 	view = vmw_view_lookup(sw_context->man, view_type, id);
437 	if (IS_ERR(view))
438 		return view;
439 
440 	ret = vmw_view_res_val_add(sw_context, view);
441 	if (ret)
442 		return ERR_PTR(ret);
443 
444 	return view;
445 }
446 
447 /**
448  * vmw_resource_context_res_add - Put resources previously bound to a context on
449  * the validation list
450  *
451  * @dev_priv: Pointer to a device private structure
452  * @sw_context: Pointer to a software context used for this command submission
453  * @ctx: Pointer to the context resource
454  *
455  * This function puts all resources that were previously bound to @ctx on the
456  * resource validation list. This is part of the context state reemission
457  */
458 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
459 					struct vmw_sw_context *sw_context,
460 					struct vmw_resource *ctx)
461 {
462 	struct list_head *binding_list;
463 	struct vmw_ctx_bindinfo *entry;
464 	int ret = 0;
465 	struct vmw_resource *res;
466 	u32 i;
467 	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
468 		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
469 
470 	/* Add all cotables to the validation list. */
471 	if (has_sm4_context(dev_priv) &&
472 	    vmw_res_type(ctx) == vmw_res_dx_context) {
473 		for (i = 0; i < cotable_max; ++i) {
474 			res = vmw_context_cotable(ctx, i);
475 			if (IS_ERR(res))
476 				continue;
477 
478 			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
479 							    VMW_RES_DIRTY_SET);
480 			if (unlikely(ret != 0))
481 				return ret;
482 		}
483 	}
484 
485 	/* Add all resources bound to the context to the validation list */
486 	mutex_lock(&dev_priv->binding_mutex);
487 	binding_list = vmw_context_binding_list(ctx);
488 
489 	list_for_each_entry(entry, binding_list, ctx_list) {
490 		if (vmw_res_type(entry->res) == vmw_res_view)
491 			ret = vmw_view_res_val_add(sw_context, entry->res);
492 		else
493 			ret = vmw_execbuf_res_noctx_val_add
494 				(sw_context, entry->res,
495 				 vmw_binding_dirtying(entry->bt));
496 		if (unlikely(ret != 0))
497 			break;
498 	}
499 
500 	if (has_sm4_context(dev_priv) &&
501 	    vmw_res_type(ctx) == vmw_res_dx_context) {
502 		struct vmw_buffer_object *dx_query_mob;
503 
504 		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
505 		if (dx_query_mob)
506 			ret = vmw_validation_add_bo(sw_context->ctx,
507 						    dx_query_mob, true, false);
508 	}
509 
510 	mutex_unlock(&dev_priv->binding_mutex);
511 	return ret;
512 }
513 
514 /**
515  * vmw_resource_relocation_add - Add a relocation to the relocation list
516  *
517  * @sw_context: Pointer to the software context.
518  * @res: The resource.
519  * @offset: Offset into the command buffer currently being parsed where the id
520  * that needs fixup is located. Granularity is one byte.
521  * @rel_type: Relocation type.
522  */
523 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
524 				       const struct vmw_resource *res,
525 				       unsigned long offset,
526 				       enum vmw_resource_relocation_type
527 				       rel_type)
528 {
529 	struct vmw_resource_relocation *rel;
530 
531 	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
532 	if (unlikely(!rel)) {
533 		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
534 		return -ENOMEM;
535 	}
536 
537 	rel->res = res;
538 	rel->offset = offset;
539 	rel->rel_type = rel_type;
540 	list_add_tail(&rel->head, &sw_context->res_relocations);
541 
542 	return 0;
543 }
544 
545 /**
546  * vmw_resource_relocations_free - Free all relocations on a list
547  *
548  * @list: Pointer to the head of the relocation list
549  */
550 static void vmw_resource_relocations_free(struct list_head *list)
551 {
552 	/* Memory is validation context memory, so no need to free it */
553 	INIT_LIST_HEAD(list);
554 }
555 
556 /**
557  * vmw_resource_relocations_apply - Apply all relocations on a list
558  *
559  * @cb: Pointer to the start of the command buffer bein patch. This need not be
560  * the same buffer as the one being parsed when the relocation list was built,
561  * but the contents must be the same modulo the resource ids.
562  * @list: Pointer to the head of the relocation list.
563  */
564 static void vmw_resource_relocations_apply(uint32_t *cb,
565 					   struct list_head *list)
566 {
567 	struct vmw_resource_relocation *rel;
568 
569 	/* Validate the struct vmw_resource_relocation member size */
570 	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
571 	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
572 
573 	list_for_each_entry(rel, list, head) {
574 		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
575 		switch (rel->rel_type) {
576 		case vmw_res_rel_normal:
577 			*addr = rel->res->id;
578 			break;
579 		case vmw_res_rel_nop:
580 			*addr = SVGA_3D_CMD_NOP;
581 			break;
582 		default:
583 			if (rel->res->id == -1)
584 				*addr = SVGA_3D_CMD_NOP;
585 			break;
586 		}
587 	}
588 }
589 
590 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
591 			   struct vmw_sw_context *sw_context,
592 			   SVGA3dCmdHeader *header)
593 {
594 	return -EINVAL;
595 }
596 
597 static int vmw_cmd_ok(struct vmw_private *dev_priv,
598 		      struct vmw_sw_context *sw_context,
599 		      SVGA3dCmdHeader *header)
600 {
601 	return 0;
602 }
603 
604 /**
605  * vmw_resources_reserve - Reserve all resources on the sw_context's resource
606  * list.
607  *
608  * @sw_context: Pointer to the software context.
609  *
610  * Note that since vmware's command submission currently is protected by the
611  * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
612  * only a single thread at once will attempt this.
613  */
614 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
615 {
616 	int ret;
617 
618 	ret = vmw_validation_res_reserve(sw_context->ctx, true);
619 	if (ret)
620 		return ret;
621 
622 	if (sw_context->dx_query_mob) {
623 		struct vmw_buffer_object *expected_dx_query_mob;
624 
625 		expected_dx_query_mob =
626 			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
627 		if (expected_dx_query_mob &&
628 		    expected_dx_query_mob != sw_context->dx_query_mob) {
629 			ret = -EINVAL;
630 		}
631 	}
632 
633 	return ret;
634 }
635 
636 /**
637  * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
638  * resource validate list unless it's already there.
639  *
640  * @dev_priv: Pointer to a device private structure.
641  * @sw_context: Pointer to the software context.
642  * @res_type: Resource type.
643  * @dirty: Whether to change dirty status.
644  * @converter: User-space visisble type specific information.
645  * @id_loc: Pointer to the location in the command buffer currently being parsed
646  * from where the user-space resource id handle is located.
647  * @p_res: Pointer to pointer to resource validalidation node. Populated on
648  * exit.
649  */
650 static int
651 vmw_cmd_res_check(struct vmw_private *dev_priv,
652 		  struct vmw_sw_context *sw_context,
653 		  enum vmw_res_type res_type,
654 		  u32 dirty,
655 		  const struct vmw_user_resource_conv *converter,
656 		  uint32_t *id_loc,
657 		  struct vmw_resource **p_res)
658 {
659 	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
660 	struct vmw_resource *res;
661 	int ret;
662 
663 	if (p_res)
664 		*p_res = NULL;
665 
666 	if (*id_loc == SVGA3D_INVALID_ID) {
667 		if (res_type == vmw_res_context) {
668 			VMW_DEBUG_USER("Illegal context invalid id.\n");
669 			return -EINVAL;
670 		}
671 		return 0;
672 	}
673 
674 	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
675 		res = rcache->res;
676 		if (dirty)
677 			vmw_validation_res_set_dirty(sw_context->ctx,
678 						     rcache->private, dirty);
679 	} else {
680 		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
681 
682 		ret = vmw_validation_preload_res(sw_context->ctx, size);
683 		if (ret)
684 			return ret;
685 
686 		res = vmw_user_resource_noref_lookup_handle
687 			(dev_priv, sw_context->fp->tfile, *id_loc, converter);
688 		if (IS_ERR(res)) {
689 			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
690 				       (unsigned int) *id_loc);
691 			return PTR_ERR(res);
692 		}
693 
694 		ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
695 		if (unlikely(ret != 0))
696 			return ret;
697 
698 		if (rcache->valid && rcache->res == res) {
699 			rcache->valid_handle = true;
700 			rcache->handle = *id_loc;
701 		}
702 	}
703 
704 	ret = vmw_resource_relocation_add(sw_context, res,
705 					  vmw_ptr_diff(sw_context->buf_start,
706 						       id_loc),
707 					  vmw_res_rel_normal);
708 	if (p_res)
709 		*p_res = res;
710 
711 	return 0;
712 }
713 
714 /**
715  * vmw_rebind_all_dx_query - Rebind DX query associated with the context
716  *
717  * @ctx_res: context the query belongs to
718  *
719  * This function assumes binding_mutex is held.
720  */
721 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
722 {
723 	struct vmw_private *dev_priv = ctx_res->dev_priv;
724 	struct vmw_buffer_object *dx_query_mob;
725 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
726 
727 	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
728 
729 	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
730 		return 0;
731 
732 	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
733 	if (cmd == NULL)
734 		return -ENOMEM;
735 
736 	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
737 	cmd->header.size = sizeof(cmd->body);
738 	cmd->body.cid = ctx_res->id;
739 	cmd->body.mobid = dx_query_mob->base.resource->start;
740 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
741 
742 	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
743 
744 	return 0;
745 }
746 
747 /**
748  * vmw_rebind_contexts - Rebind all resources previously bound to referenced
749  * contexts.
750  *
751  * @sw_context: Pointer to the software context.
752  *
753  * Rebind context binding points that have been scrubbed because of eviction.
754  */
755 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
756 {
757 	struct vmw_ctx_validation_info *val;
758 	int ret;
759 
760 	list_for_each_entry(val, &sw_context->ctx_list, head) {
761 		ret = vmw_binding_rebind_all(val->cur);
762 		if (unlikely(ret != 0)) {
763 			if (ret != -ERESTARTSYS)
764 				VMW_DEBUG_USER("Failed to rebind context.\n");
765 			return ret;
766 		}
767 
768 		ret = vmw_rebind_all_dx_query(val->ctx);
769 		if (ret != 0) {
770 			VMW_DEBUG_USER("Failed to rebind queries.\n");
771 			return ret;
772 		}
773 	}
774 
775 	return 0;
776 }
777 
778 /**
779  * vmw_view_bindings_add - Add an array of view bindings to a context binding
780  * state tracker.
781  *
782  * @sw_context: The execbuf state used for this command.
783  * @view_type: View type for the bindings.
784  * @binding_type: Binding type for the bindings.
785  * @shader_slot: The shader slot to user for the bindings.
786  * @view_ids: Array of view ids to be bound.
787  * @num_views: Number of view ids in @view_ids.
788  * @first_slot: The binding slot to be used for the first view id in @view_ids.
789  */
790 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
791 				 enum vmw_view_type view_type,
792 				 enum vmw_ctx_binding_type binding_type,
793 				 uint32 shader_slot,
794 				 uint32 view_ids[], u32 num_views,
795 				 u32 first_slot)
796 {
797 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
798 	u32 i;
799 
800 	if (!ctx_node)
801 		return -EINVAL;
802 
803 	for (i = 0; i < num_views; ++i) {
804 		struct vmw_ctx_bindinfo_view binding;
805 		struct vmw_resource *view = NULL;
806 
807 		if (view_ids[i] != SVGA3D_INVALID_ID) {
808 			view = vmw_view_id_val_add(sw_context, view_type,
809 						   view_ids[i]);
810 			if (IS_ERR(view)) {
811 				VMW_DEBUG_USER("View not found.\n");
812 				return PTR_ERR(view);
813 			}
814 		}
815 		binding.bi.ctx = ctx_node->ctx;
816 		binding.bi.res = view;
817 		binding.bi.bt = binding_type;
818 		binding.shader_slot = shader_slot;
819 		binding.slot = first_slot + i;
820 		vmw_binding_add(ctx_node->staged, &binding.bi,
821 				shader_slot, binding.slot);
822 	}
823 
824 	return 0;
825 }
826 
827 /**
828  * vmw_cmd_cid_check - Check a command header for valid context information.
829  *
830  * @dev_priv: Pointer to a device private structure.
831  * @sw_context: Pointer to the software context.
832  * @header: A command header with an embedded user-space context handle.
833  *
834  * Convenience function: Call vmw_cmd_res_check with the user-space context
835  * handle embedded in @header.
836  */
837 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
838 			     struct vmw_sw_context *sw_context,
839 			     SVGA3dCmdHeader *header)
840 {
841 	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
842 		container_of(header, typeof(*cmd), header);
843 
844 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
845 				 VMW_RES_DIRTY_SET, user_context_converter,
846 				 &cmd->body, NULL);
847 }
848 
849 /**
850  * vmw_execbuf_info_from_res - Get the private validation metadata for a
851  * recently validated resource
852  *
853  * @sw_context: Pointer to the command submission context
854  * @res: The resource
855  *
856  * The resource pointed to by @res needs to be present in the command submission
857  * context's resource cache and hence the last resource of that type to be
858  * processed by the validation code.
859  *
860  * Return: a pointer to the private metadata of the resource, or NULL if it
861  * wasn't found
862  */
863 static struct vmw_ctx_validation_info *
864 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
865 			  struct vmw_resource *res)
866 {
867 	struct vmw_res_cache_entry *rcache =
868 		&sw_context->res_cache[vmw_res_type(res)];
869 
870 	if (rcache->valid && rcache->res == res)
871 		return rcache->private;
872 
873 	WARN_ON_ONCE(true);
874 	return NULL;
875 }
876 
877 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
878 					   struct vmw_sw_context *sw_context,
879 					   SVGA3dCmdHeader *header)
880 {
881 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
882 	struct vmw_resource *ctx;
883 	struct vmw_resource *res;
884 	int ret;
885 
886 	cmd = container_of(header, typeof(*cmd), header);
887 
888 	if (cmd->body.type >= SVGA3D_RT_MAX) {
889 		VMW_DEBUG_USER("Illegal render target type %u.\n",
890 			       (unsigned int) cmd->body.type);
891 		return -EINVAL;
892 	}
893 
894 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
895 				VMW_RES_DIRTY_SET, user_context_converter,
896 				&cmd->body.cid, &ctx);
897 	if (unlikely(ret != 0))
898 		return ret;
899 
900 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
901 				VMW_RES_DIRTY_SET, user_surface_converter,
902 				&cmd->body.target.sid, &res);
903 	if (unlikely(ret))
904 		return ret;
905 
906 	if (dev_priv->has_mob) {
907 		struct vmw_ctx_bindinfo_view binding;
908 		struct vmw_ctx_validation_info *node;
909 
910 		node = vmw_execbuf_info_from_res(sw_context, ctx);
911 		if (!node)
912 			return -EINVAL;
913 
914 		binding.bi.ctx = ctx;
915 		binding.bi.res = res;
916 		binding.bi.bt = vmw_ctx_binding_rt;
917 		binding.slot = cmd->body.type;
918 		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
919 	}
920 
921 	return 0;
922 }
923 
924 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
925 				      struct vmw_sw_context *sw_context,
926 				      SVGA3dCmdHeader *header)
927 {
928 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
929 	int ret;
930 
931 	cmd = container_of(header, typeof(*cmd), header);
932 
933 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
934 				VMW_RES_DIRTY_NONE, user_surface_converter,
935 				&cmd->body.src.sid, NULL);
936 	if (ret)
937 		return ret;
938 
939 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
940 				 VMW_RES_DIRTY_SET, user_surface_converter,
941 				 &cmd->body.dest.sid, NULL);
942 }
943 
944 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
945 				     struct vmw_sw_context *sw_context,
946 				     SVGA3dCmdHeader *header)
947 {
948 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
949 	int ret;
950 
951 	cmd = container_of(header, typeof(*cmd), header);
952 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
953 				VMW_RES_DIRTY_NONE, user_surface_converter,
954 				&cmd->body.src, NULL);
955 	if (ret != 0)
956 		return ret;
957 
958 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
959 				 VMW_RES_DIRTY_SET, user_surface_converter,
960 				 &cmd->body.dest, NULL);
961 }
962 
963 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
964 				   struct vmw_sw_context *sw_context,
965 				   SVGA3dCmdHeader *header)
966 {
967 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
968 	int ret;
969 
970 	cmd = container_of(header, typeof(*cmd), header);
971 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
972 				VMW_RES_DIRTY_NONE, user_surface_converter,
973 				&cmd->body.srcSid, NULL);
974 	if (ret != 0)
975 		return ret;
976 
977 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
978 				 VMW_RES_DIRTY_SET, user_surface_converter,
979 				 &cmd->body.dstSid, NULL);
980 }
981 
982 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
983 				     struct vmw_sw_context *sw_context,
984 				     SVGA3dCmdHeader *header)
985 {
986 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
987 	int ret;
988 
989 	cmd = container_of(header, typeof(*cmd), header);
990 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
991 				VMW_RES_DIRTY_NONE, user_surface_converter,
992 				&cmd->body.src.sid, NULL);
993 	if (unlikely(ret != 0))
994 		return ret;
995 
996 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
997 				 VMW_RES_DIRTY_SET, user_surface_converter,
998 				 &cmd->body.dest.sid, NULL);
999 }
1000 
1001 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1002 					 struct vmw_sw_context *sw_context,
1003 					 SVGA3dCmdHeader *header)
1004 {
1005 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
1006 		container_of(header, typeof(*cmd), header);
1007 
1008 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1009 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1010 				 &cmd->body.srcImage.sid, NULL);
1011 }
1012 
1013 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1014 				 struct vmw_sw_context *sw_context,
1015 				 SVGA3dCmdHeader *header)
1016 {
1017 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1018 		container_of(header, typeof(*cmd), header);
1019 
1020 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1021 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1022 				 &cmd->body.sid, NULL);
1023 }
1024 
1025 /**
1026  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1027  *
1028  * @dev_priv: The device private structure.
1029  * @new_query_bo: The new buffer holding query results.
1030  * @sw_context: The software context used for this command submission.
1031  *
1032  * This function checks whether @new_query_bo is suitable for holding query
1033  * results, and if another buffer currently is pinned for query results. If so,
1034  * the function prepares the state of @sw_context for switching pinned buffers
1035  * after successful submission of the current command batch.
1036  */
1037 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1038 				       struct vmw_buffer_object *new_query_bo,
1039 				       struct vmw_sw_context *sw_context)
1040 {
1041 	struct vmw_res_cache_entry *ctx_entry =
1042 		&sw_context->res_cache[vmw_res_context];
1043 	int ret;
1044 
1045 	BUG_ON(!ctx_entry->valid);
1046 	sw_context->last_query_ctx = ctx_entry->res;
1047 
1048 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1049 
1050 		if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
1051 			VMW_DEBUG_USER("Query buffer too large.\n");
1052 			return -EINVAL;
1053 		}
1054 
1055 		if (unlikely(sw_context->cur_query_bo != NULL)) {
1056 			sw_context->needs_post_query_barrier = true;
1057 			ret = vmw_validation_add_bo(sw_context->ctx,
1058 						    sw_context->cur_query_bo,
1059 						    dev_priv->has_mob, false);
1060 			if (unlikely(ret != 0))
1061 				return ret;
1062 		}
1063 		sw_context->cur_query_bo = new_query_bo;
1064 
1065 		ret = vmw_validation_add_bo(sw_context->ctx,
1066 					    dev_priv->dummy_query_bo,
1067 					    dev_priv->has_mob, false);
1068 		if (unlikely(ret != 0))
1069 			return ret;
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 /**
1076  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1077  *
1078  * @dev_priv: The device private structure.
1079  * @sw_context: The software context used for this command submission batch.
1080  *
1081  * This function will check if we're switching query buffers, and will then,
1082  * issue a dummy occlusion query wait used as a query barrier. When the fence
1083  * object following that query wait has signaled, we are sure that all preceding
1084  * queries have finished, and the old query buffer can be unpinned. However,
1085  * since both the new query buffer and the old one are fenced with that fence,
1086  * we can do an asynchronus unpin now, and be sure that the old query buffer
1087  * won't be moved until the fence has signaled.
1088  *
1089  * As mentioned above, both the new - and old query buffers need to be fenced
1090  * using a sequence emitted *after* calling this function.
1091  */
1092 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1093 				     struct vmw_sw_context *sw_context)
1094 {
1095 	/*
1096 	 * The validate list should still hold references to all
1097 	 * contexts here.
1098 	 */
1099 	if (sw_context->needs_post_query_barrier) {
1100 		struct vmw_res_cache_entry *ctx_entry =
1101 			&sw_context->res_cache[vmw_res_context];
1102 		struct vmw_resource *ctx;
1103 		int ret;
1104 
1105 		BUG_ON(!ctx_entry->valid);
1106 		ctx = ctx_entry->res;
1107 
1108 		ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1109 
1110 		if (unlikely(ret != 0))
1111 			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1112 	}
1113 
1114 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1115 		if (dev_priv->pinned_bo) {
1116 			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1117 			vmw_bo_unreference(&dev_priv->pinned_bo);
1118 		}
1119 
1120 		if (!sw_context->needs_post_query_barrier) {
1121 			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1122 
1123 			/*
1124 			 * We pin also the dummy_query_bo buffer so that we
1125 			 * don't need to validate it when emitting dummy queries
1126 			 * in context destroy paths.
1127 			 */
1128 			if (!dev_priv->dummy_query_bo_pinned) {
1129 				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1130 						    true);
1131 				dev_priv->dummy_query_bo_pinned = true;
1132 			}
1133 
1134 			BUG_ON(sw_context->last_query_ctx == NULL);
1135 			dev_priv->query_cid = sw_context->last_query_ctx->id;
1136 			dev_priv->query_cid_valid = true;
1137 			dev_priv->pinned_bo =
1138 				vmw_bo_reference(sw_context->cur_query_bo);
1139 		}
1140 	}
1141 }
1142 
1143 /**
1144  * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1145  * to a MOB id.
1146  *
1147  * @dev_priv: Pointer to a device private structure.
1148  * @sw_context: The software context used for this command batch validation.
1149  * @id: Pointer to the user-space handle to be translated.
1150  * @vmw_bo_p: Points to a location that, on successful return will carry a
1151  * non-reference-counted pointer to the buffer object identified by the
1152  * user-space handle in @id.
1153  *
1154  * This function saves information needed to translate a user-space buffer
1155  * handle to a MOB id. The translation does not take place immediately, but
1156  * during a call to vmw_apply_relocations().
1157  *
1158  * This function builds a relocation list and a list of buffers to validate. The
1159  * former needs to be freed using either vmw_apply_relocations() or
1160  * vmw_free_relocations(). The latter needs to be freed using
1161  * vmw_clear_validations.
1162  */
1163 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1164 				 struct vmw_sw_context *sw_context,
1165 				 SVGAMobId *id,
1166 				 struct vmw_buffer_object **vmw_bo_p)
1167 {
1168 	struct vmw_buffer_object *vmw_bo;
1169 	uint32_t handle = *id;
1170 	struct vmw_relocation *reloc;
1171 	int ret;
1172 
1173 	vmw_validation_preload_bo(sw_context->ctx);
1174 	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1175 	if (IS_ERR(vmw_bo)) {
1176 		VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1177 		return PTR_ERR(vmw_bo);
1178 	}
1179 
1180 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1181 	vmw_user_bo_noref_release();
1182 	if (unlikely(ret != 0))
1183 		return ret;
1184 
1185 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1186 	if (!reloc)
1187 		return -ENOMEM;
1188 
1189 	reloc->mob_loc = id;
1190 	reloc->vbo = vmw_bo;
1191 
1192 	*vmw_bo_p = vmw_bo;
1193 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1194 
1195 	return 0;
1196 }
1197 
1198 /**
1199  * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1200  * to a valid SVGAGuestPtr
1201  *
1202  * @dev_priv: Pointer to a device private structure.
1203  * @sw_context: The software context used for this command batch validation.
1204  * @ptr: Pointer to the user-space handle to be translated.
1205  * @vmw_bo_p: Points to a location that, on successful return will carry a
1206  * non-reference-counted pointer to the DMA buffer identified by the user-space
1207  * handle in @id.
1208  *
1209  * This function saves information needed to translate a user-space buffer
1210  * handle to a valid SVGAGuestPtr. The translation does not take place
1211  * immediately, but during a call to vmw_apply_relocations().
1212  *
1213  * This function builds a relocation list and a list of buffers to validate.
1214  * The former needs to be freed using either vmw_apply_relocations() or
1215  * vmw_free_relocations(). The latter needs to be freed using
1216  * vmw_clear_validations.
1217  */
1218 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1219 				   struct vmw_sw_context *sw_context,
1220 				   SVGAGuestPtr *ptr,
1221 				   struct vmw_buffer_object **vmw_bo_p)
1222 {
1223 	struct vmw_buffer_object *vmw_bo;
1224 	uint32_t handle = ptr->gmrId;
1225 	struct vmw_relocation *reloc;
1226 	int ret;
1227 
1228 	vmw_validation_preload_bo(sw_context->ctx);
1229 	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1230 	if (IS_ERR(vmw_bo)) {
1231 		VMW_DEBUG_USER("Could not find or use GMR region.\n");
1232 		return PTR_ERR(vmw_bo);
1233 	}
1234 
1235 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1236 	vmw_user_bo_noref_release();
1237 	if (unlikely(ret != 0))
1238 		return ret;
1239 
1240 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1241 	if (!reloc)
1242 		return -ENOMEM;
1243 
1244 	reloc->location = ptr;
1245 	reloc->vbo = vmw_bo;
1246 	*vmw_bo_p = vmw_bo;
1247 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1248 
1249 	return 0;
1250 }
1251 
1252 /**
1253  * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1254  *
1255  * @dev_priv: Pointer to a device private struct.
1256  * @sw_context: The software context used for this command submission.
1257  * @header: Pointer to the command header in the command stream.
1258  *
1259  * This function adds the new query into the query COTABLE
1260  */
1261 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1262 				   struct vmw_sw_context *sw_context,
1263 				   SVGA3dCmdHeader *header)
1264 {
1265 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1266 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1267 	struct vmw_resource *cotable_res;
1268 	int ret;
1269 
1270 	if (!ctx_node)
1271 		return -EINVAL;
1272 
1273 	cmd = container_of(header, typeof(*cmd), header);
1274 
1275 	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1276 	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1277 		return -EINVAL;
1278 
1279 	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1280 	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1281 
1282 	return ret;
1283 }
1284 
1285 /**
1286  * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1287  *
1288  * @dev_priv: Pointer to a device private struct.
1289  * @sw_context: The software context used for this command submission.
1290  * @header: Pointer to the command header in the command stream.
1291  *
1292  * The query bind operation will eventually associate the query ID with its
1293  * backing MOB.  In this function, we take the user mode MOB ID and use
1294  * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1295  */
1296 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1297 				 struct vmw_sw_context *sw_context,
1298 				 SVGA3dCmdHeader *header)
1299 {
1300 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1301 	struct vmw_buffer_object *vmw_bo;
1302 	int ret;
1303 
1304 	cmd = container_of(header, typeof(*cmd), header);
1305 
1306 	/*
1307 	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1308 	 * list so its kernel mode MOB ID can be filled in later
1309 	 */
1310 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1311 				    &vmw_bo);
1312 
1313 	if (ret != 0)
1314 		return ret;
1315 
1316 	sw_context->dx_query_mob = vmw_bo;
1317 	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1318 	return 0;
1319 }
1320 
1321 /**
1322  * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1323  *
1324  * @dev_priv: Pointer to a device private struct.
1325  * @sw_context: The software context used for this command submission.
1326  * @header: Pointer to the command header in the command stream.
1327  */
1328 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1329 				  struct vmw_sw_context *sw_context,
1330 				  SVGA3dCmdHeader *header)
1331 {
1332 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1333 		container_of(header, typeof(*cmd), header);
1334 
1335 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1336 				 VMW_RES_DIRTY_SET, user_context_converter,
1337 				 &cmd->body.cid, NULL);
1338 }
1339 
1340 /**
1341  * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1342  *
1343  * @dev_priv: Pointer to a device private struct.
1344  * @sw_context: The software context used for this command submission.
1345  * @header: Pointer to the command header in the command stream.
1346  */
1347 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1348 			       struct vmw_sw_context *sw_context,
1349 			       SVGA3dCmdHeader *header)
1350 {
1351 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1352 		container_of(header, typeof(*cmd), header);
1353 
1354 	if (unlikely(dev_priv->has_mob)) {
1355 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1356 
1357 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1358 
1359 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1360 		gb_cmd.header.size = cmd->header.size;
1361 		gb_cmd.body.cid = cmd->body.cid;
1362 		gb_cmd.body.type = cmd->body.type;
1363 
1364 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1365 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1366 	}
1367 
1368 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1369 				 VMW_RES_DIRTY_SET, user_context_converter,
1370 				 &cmd->body.cid, NULL);
1371 }
1372 
1373 /**
1374  * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1375  *
1376  * @dev_priv: Pointer to a device private struct.
1377  * @sw_context: The software context used for this command submission.
1378  * @header: Pointer to the command header in the command stream.
1379  */
1380 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1381 				struct vmw_sw_context *sw_context,
1382 				SVGA3dCmdHeader *header)
1383 {
1384 	struct vmw_buffer_object *vmw_bo;
1385 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1386 	int ret;
1387 
1388 	cmd = container_of(header, typeof(*cmd), header);
1389 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1390 	if (unlikely(ret != 0))
1391 		return ret;
1392 
1393 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1394 				    &vmw_bo);
1395 	if (unlikely(ret != 0))
1396 		return ret;
1397 
1398 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1399 
1400 	return ret;
1401 }
1402 
1403 /**
1404  * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1405  *
1406  * @dev_priv: Pointer to a device private struct.
1407  * @sw_context: The software context used for this command submission.
1408  * @header: Pointer to the command header in the command stream.
1409  */
1410 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1411 			     struct vmw_sw_context *sw_context,
1412 			     SVGA3dCmdHeader *header)
1413 {
1414 	struct vmw_buffer_object *vmw_bo;
1415 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1416 	int ret;
1417 
1418 	cmd = container_of(header, typeof(*cmd), header);
1419 	if (dev_priv->has_mob) {
1420 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1421 
1422 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1423 
1424 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1425 		gb_cmd.header.size = cmd->header.size;
1426 		gb_cmd.body.cid = cmd->body.cid;
1427 		gb_cmd.body.type = cmd->body.type;
1428 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1429 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1430 
1431 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1432 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1433 	}
1434 
1435 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1436 	if (unlikely(ret != 0))
1437 		return ret;
1438 
1439 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1440 				      &cmd->body.guestResult, &vmw_bo);
1441 	if (unlikely(ret != 0))
1442 		return ret;
1443 
1444 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1445 
1446 	return ret;
1447 }
1448 
1449 /**
1450  * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1451  *
1452  * @dev_priv: Pointer to a device private struct.
1453  * @sw_context: The software context used for this command submission.
1454  * @header: Pointer to the command header in the command stream.
1455  */
1456 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1457 				 struct vmw_sw_context *sw_context,
1458 				 SVGA3dCmdHeader *header)
1459 {
1460 	struct vmw_buffer_object *vmw_bo;
1461 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1462 	int ret;
1463 
1464 	cmd = container_of(header, typeof(*cmd), header);
1465 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1466 	if (unlikely(ret != 0))
1467 		return ret;
1468 
1469 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1470 				    &vmw_bo);
1471 	if (unlikely(ret != 0))
1472 		return ret;
1473 
1474 	return 0;
1475 }
1476 
1477 /**
1478  * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1479  *
1480  * @dev_priv: Pointer to a device private struct.
1481  * @sw_context: The software context used for this command submission.
1482  * @header: Pointer to the command header in the command stream.
1483  */
1484 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1485 			      struct vmw_sw_context *sw_context,
1486 			      SVGA3dCmdHeader *header)
1487 {
1488 	struct vmw_buffer_object *vmw_bo;
1489 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1490 	int ret;
1491 
1492 	cmd = container_of(header, typeof(*cmd), header);
1493 	if (dev_priv->has_mob) {
1494 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1495 
1496 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1497 
1498 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1499 		gb_cmd.header.size = cmd->header.size;
1500 		gb_cmd.body.cid = cmd->body.cid;
1501 		gb_cmd.body.type = cmd->body.type;
1502 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1503 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1504 
1505 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1506 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1507 	}
1508 
1509 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1510 	if (unlikely(ret != 0))
1511 		return ret;
1512 
1513 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1514 				      &cmd->body.guestResult, &vmw_bo);
1515 	if (unlikely(ret != 0))
1516 		return ret;
1517 
1518 	return 0;
1519 }
1520 
1521 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1522 		       struct vmw_sw_context *sw_context,
1523 		       SVGA3dCmdHeader *header)
1524 {
1525 	struct vmw_buffer_object *vmw_bo = NULL;
1526 	struct vmw_surface *srf = NULL;
1527 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1528 	int ret;
1529 	SVGA3dCmdSurfaceDMASuffix *suffix;
1530 	uint32_t bo_size;
1531 	bool dirty;
1532 
1533 	cmd = container_of(header, typeof(*cmd), header);
1534 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1535 					       header->size - sizeof(*suffix));
1536 
1537 	/* Make sure device and verifier stays in sync. */
1538 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1539 		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1540 		return -EINVAL;
1541 	}
1542 
1543 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1544 				      &cmd->body.guest.ptr, &vmw_bo);
1545 	if (unlikely(ret != 0))
1546 		return ret;
1547 
1548 	/* Make sure DMA doesn't cross BO boundaries. */
1549 	bo_size = vmw_bo->base.base.size;
1550 	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1551 		VMW_DEBUG_USER("Invalid DMA offset.\n");
1552 		return -EINVAL;
1553 	}
1554 
1555 	bo_size -= cmd->body.guest.ptr.offset;
1556 	if (unlikely(suffix->maximumOffset > bo_size))
1557 		suffix->maximumOffset = bo_size;
1558 
1559 	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1560 		VMW_RES_DIRTY_SET : 0;
1561 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1562 				dirty, user_surface_converter,
1563 				&cmd->body.host.sid, NULL);
1564 	if (unlikely(ret != 0)) {
1565 		if (unlikely(ret != -ERESTARTSYS))
1566 			VMW_DEBUG_USER("could not find surface for DMA.\n");
1567 		return ret;
1568 	}
1569 
1570 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1571 
1572 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1573 
1574 	return 0;
1575 }
1576 
1577 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1578 			struct vmw_sw_context *sw_context,
1579 			SVGA3dCmdHeader *header)
1580 {
1581 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1582 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1583 		(unsigned long)header + sizeof(*cmd));
1584 	SVGA3dPrimitiveRange *range;
1585 	uint32_t i;
1586 	uint32_t maxnum;
1587 	int ret;
1588 
1589 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1590 	if (unlikely(ret != 0))
1591 		return ret;
1592 
1593 	cmd = container_of(header, typeof(*cmd), header);
1594 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1595 
1596 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1597 		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1598 		return -EINVAL;
1599 	}
1600 
1601 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1602 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1603 					VMW_RES_DIRTY_NONE,
1604 					user_surface_converter,
1605 					&decl->array.surfaceId, NULL);
1606 		if (unlikely(ret != 0))
1607 			return ret;
1608 	}
1609 
1610 	maxnum = (header->size - sizeof(cmd->body) -
1611 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1612 	if (unlikely(cmd->body.numRanges > maxnum)) {
1613 		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1614 		return -EINVAL;
1615 	}
1616 
1617 	range = (SVGA3dPrimitiveRange *) decl;
1618 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1619 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1620 					VMW_RES_DIRTY_NONE,
1621 					user_surface_converter,
1622 					&range->indexArray.surfaceId, NULL);
1623 		if (unlikely(ret != 0))
1624 			return ret;
1625 	}
1626 	return 0;
1627 }
1628 
1629 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1630 			     struct vmw_sw_context *sw_context,
1631 			     SVGA3dCmdHeader *header)
1632 {
1633 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1634 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1635 	  ((unsigned long) header + header->size + sizeof(header));
1636 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1637 		((unsigned long) header + sizeof(*cmd));
1638 	struct vmw_resource *ctx;
1639 	struct vmw_resource *res;
1640 	int ret;
1641 
1642 	cmd = container_of(header, typeof(*cmd), header);
1643 
1644 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1645 				VMW_RES_DIRTY_SET, user_context_converter,
1646 				&cmd->body.cid, &ctx);
1647 	if (unlikely(ret != 0))
1648 		return ret;
1649 
1650 	for (; cur_state < last_state; ++cur_state) {
1651 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1652 			continue;
1653 
1654 		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1655 			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1656 				       (unsigned int) cur_state->stage);
1657 			return -EINVAL;
1658 		}
1659 
1660 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1661 					VMW_RES_DIRTY_NONE,
1662 					user_surface_converter,
1663 					&cur_state->value, &res);
1664 		if (unlikely(ret != 0))
1665 			return ret;
1666 
1667 		if (dev_priv->has_mob) {
1668 			struct vmw_ctx_bindinfo_tex binding;
1669 			struct vmw_ctx_validation_info *node;
1670 
1671 			node = vmw_execbuf_info_from_res(sw_context, ctx);
1672 			if (!node)
1673 				return -EINVAL;
1674 
1675 			binding.bi.ctx = ctx;
1676 			binding.bi.res = res;
1677 			binding.bi.bt = vmw_ctx_binding_tex;
1678 			binding.texture_stage = cur_state->stage;
1679 			vmw_binding_add(node->staged, &binding.bi, 0,
1680 					binding.texture_stage);
1681 		}
1682 	}
1683 
1684 	return 0;
1685 }
1686 
1687 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1688 				      struct vmw_sw_context *sw_context,
1689 				      void *buf)
1690 {
1691 	struct vmw_buffer_object *vmw_bo;
1692 
1693 	struct {
1694 		uint32_t header;
1695 		SVGAFifoCmdDefineGMRFB body;
1696 	} *cmd = buf;
1697 
1698 	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1699 				       &vmw_bo);
1700 }
1701 
1702 /**
1703  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1704  * switching
1705  *
1706  * @dev_priv: Pointer to a device private struct.
1707  * @sw_context: The software context being used for this batch.
1708  * @res: Pointer to the resource.
1709  * @buf_id: Pointer to the user-space backup buffer handle in the command
1710  * stream.
1711  * @backup_offset: Offset of backup into MOB.
1712  *
1713  * This function prepares for registering a switch of backup buffers in the
1714  * resource metadata just prior to unreserving. It's basically a wrapper around
1715  * vmw_cmd_res_switch_backup with a different interface.
1716  */
1717 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1718 				     struct vmw_sw_context *sw_context,
1719 				     struct vmw_resource *res, uint32_t *buf_id,
1720 				     unsigned long backup_offset)
1721 {
1722 	struct vmw_buffer_object *vbo;
1723 	void *info;
1724 	int ret;
1725 
1726 	info = vmw_execbuf_info_from_res(sw_context, res);
1727 	if (!info)
1728 		return -EINVAL;
1729 
1730 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1731 	if (ret)
1732 		return ret;
1733 
1734 	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1735 					 backup_offset);
1736 	return 0;
1737 }
1738 
1739 /**
1740  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1741  *
1742  * @dev_priv: Pointer to a device private struct.
1743  * @sw_context: The software context being used for this batch.
1744  * @res_type: The resource type.
1745  * @converter: Information about user-space binding for this resource type.
1746  * @res_id: Pointer to the user-space resource handle in the command stream.
1747  * @buf_id: Pointer to the user-space backup buffer handle in the command
1748  * stream.
1749  * @backup_offset: Offset of backup into MOB.
1750  *
1751  * This function prepares for registering a switch of backup buffers in the
1752  * resource metadata just prior to unreserving. It's basically a wrapper around
1753  * vmw_cmd_res_switch_backup with a different interface.
1754  */
1755 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1756 				 struct vmw_sw_context *sw_context,
1757 				 enum vmw_res_type res_type,
1758 				 const struct vmw_user_resource_conv
1759 				 *converter, uint32_t *res_id, uint32_t *buf_id,
1760 				 unsigned long backup_offset)
1761 {
1762 	struct vmw_resource *res;
1763 	int ret;
1764 
1765 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1766 				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1767 	if (ret)
1768 		return ret;
1769 
1770 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1771 					 backup_offset);
1772 }
1773 
1774 /**
1775  * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1776  *
1777  * @dev_priv: Pointer to a device private struct.
1778  * @sw_context: The software context being used for this batch.
1779  * @header: Pointer to the command header in the command stream.
1780  */
1781 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1782 				   struct vmw_sw_context *sw_context,
1783 				   SVGA3dCmdHeader *header)
1784 {
1785 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1786 		container_of(header, typeof(*cmd), header);
1787 
1788 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1789 				     user_surface_converter, &cmd->body.sid,
1790 				     &cmd->body.mobid, 0);
1791 }
1792 
1793 /**
1794  * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1795  *
1796  * @dev_priv: Pointer to a device private struct.
1797  * @sw_context: The software context being used for this batch.
1798  * @header: Pointer to the command header in the command stream.
1799  */
1800 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1801 				   struct vmw_sw_context *sw_context,
1802 				   SVGA3dCmdHeader *header)
1803 {
1804 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1805 		container_of(header, typeof(*cmd), header);
1806 
1807 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1808 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1809 				 &cmd->body.image.sid, NULL);
1810 }
1811 
1812 /**
1813  * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1814  *
1815  * @dev_priv: Pointer to a device private struct.
1816  * @sw_context: The software context being used for this batch.
1817  * @header: Pointer to the command header in the command stream.
1818  */
1819 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1820 				     struct vmw_sw_context *sw_context,
1821 				     SVGA3dCmdHeader *header)
1822 {
1823 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1824 		container_of(header, typeof(*cmd), header);
1825 
1826 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1827 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1828 				 &cmd->body.sid, NULL);
1829 }
1830 
1831 /**
1832  * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1833  *
1834  * @dev_priv: Pointer to a device private struct.
1835  * @sw_context: The software context being used for this batch.
1836  * @header: Pointer to the command header in the command stream.
1837  */
1838 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1839 				     struct vmw_sw_context *sw_context,
1840 				     SVGA3dCmdHeader *header)
1841 {
1842 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1843 		container_of(header, typeof(*cmd), header);
1844 
1845 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1846 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1847 				 &cmd->body.image.sid, NULL);
1848 }
1849 
1850 /**
1851  * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1852  * command
1853  *
1854  * @dev_priv: Pointer to a device private struct.
1855  * @sw_context: The software context being used for this batch.
1856  * @header: Pointer to the command header in the command stream.
1857  */
1858 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1859 				       struct vmw_sw_context *sw_context,
1860 				       SVGA3dCmdHeader *header)
1861 {
1862 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1863 		container_of(header, typeof(*cmd), header);
1864 
1865 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1866 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1867 				 &cmd->body.sid, NULL);
1868 }
1869 
1870 /**
1871  * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1872  * command
1873  *
1874  * @dev_priv: Pointer to a device private struct.
1875  * @sw_context: The software context being used for this batch.
1876  * @header: Pointer to the command header in the command stream.
1877  */
1878 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1879 				       struct vmw_sw_context *sw_context,
1880 				       SVGA3dCmdHeader *header)
1881 {
1882 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1883 		container_of(header, typeof(*cmd), header);
1884 
1885 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1886 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1887 				 &cmd->body.image.sid, NULL);
1888 }
1889 
1890 /**
1891  * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1892  * command
1893  *
1894  * @dev_priv: Pointer to a device private struct.
1895  * @sw_context: The software context being used for this batch.
1896  * @header: Pointer to the command header in the command stream.
1897  */
1898 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1899 					 struct vmw_sw_context *sw_context,
1900 					 SVGA3dCmdHeader *header)
1901 {
1902 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1903 		container_of(header, typeof(*cmd), header);
1904 
1905 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1906 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1907 				 &cmd->body.sid, NULL);
1908 }
1909 
1910 /**
1911  * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1912  *
1913  * @dev_priv: Pointer to a device private struct.
1914  * @sw_context: The software context being used for this batch.
1915  * @header: Pointer to the command header in the command stream.
1916  */
1917 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1918 				 struct vmw_sw_context *sw_context,
1919 				 SVGA3dCmdHeader *header)
1920 {
1921 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1922 	int ret;
1923 	size_t size;
1924 	struct vmw_resource *ctx;
1925 
1926 	cmd = container_of(header, typeof(*cmd), header);
1927 
1928 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1929 				VMW_RES_DIRTY_SET, user_context_converter,
1930 				&cmd->body.cid, &ctx);
1931 	if (unlikely(ret != 0))
1932 		return ret;
1933 
1934 	if (unlikely(!dev_priv->has_mob))
1935 		return 0;
1936 
1937 	size = cmd->header.size - sizeof(cmd->body);
1938 	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1939 				    cmd->body.shid, cmd + 1, cmd->body.type,
1940 				    size, &sw_context->staged_cmd_res);
1941 	if (unlikely(ret != 0))
1942 		return ret;
1943 
1944 	return vmw_resource_relocation_add(sw_context, NULL,
1945 					   vmw_ptr_diff(sw_context->buf_start,
1946 							&cmd->header.id),
1947 					   vmw_res_rel_nop);
1948 }
1949 
1950 /**
1951  * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1952  *
1953  * @dev_priv: Pointer to a device private struct.
1954  * @sw_context: The software context being used for this batch.
1955  * @header: Pointer to the command header in the command stream.
1956  */
1957 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1958 				  struct vmw_sw_context *sw_context,
1959 				  SVGA3dCmdHeader *header)
1960 {
1961 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1962 	int ret;
1963 	struct vmw_resource *ctx;
1964 
1965 	cmd = container_of(header, typeof(*cmd), header);
1966 
1967 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1968 				VMW_RES_DIRTY_SET, user_context_converter,
1969 				&cmd->body.cid, &ctx);
1970 	if (unlikely(ret != 0))
1971 		return ret;
1972 
1973 	if (unlikely(!dev_priv->has_mob))
1974 		return 0;
1975 
1976 	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1977 				cmd->body.type, &sw_context->staged_cmd_res);
1978 	if (unlikely(ret != 0))
1979 		return ret;
1980 
1981 	return vmw_resource_relocation_add(sw_context, NULL,
1982 					   vmw_ptr_diff(sw_context->buf_start,
1983 							&cmd->header.id),
1984 					   vmw_res_rel_nop);
1985 }
1986 
1987 /**
1988  * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1989  *
1990  * @dev_priv: Pointer to a device private struct.
1991  * @sw_context: The software context being used for this batch.
1992  * @header: Pointer to the command header in the command stream.
1993  */
1994 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1995 			      struct vmw_sw_context *sw_context,
1996 			      SVGA3dCmdHeader *header)
1997 {
1998 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1999 	struct vmw_ctx_bindinfo_shader binding;
2000 	struct vmw_resource *ctx, *res = NULL;
2001 	struct vmw_ctx_validation_info *ctx_info;
2002 	int ret;
2003 
2004 	cmd = container_of(header, typeof(*cmd), header);
2005 
2006 	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2007 		VMW_DEBUG_USER("Illegal shader type %u.\n",
2008 			       (unsigned int) cmd->body.type);
2009 		return -EINVAL;
2010 	}
2011 
2012 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2013 				VMW_RES_DIRTY_SET, user_context_converter,
2014 				&cmd->body.cid, &ctx);
2015 	if (unlikely(ret != 0))
2016 		return ret;
2017 
2018 	if (!dev_priv->has_mob)
2019 		return 0;
2020 
2021 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2022 		/*
2023 		 * This is the compat shader path - Per device guest-backed
2024 		 * shaders, but user-space thinks it's per context host-
2025 		 * backed shaders.
2026 		 */
2027 		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2028 					cmd->body.shid, cmd->body.type);
2029 		if (!IS_ERR(res)) {
2030 			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2031 							    VMW_RES_DIRTY_NONE);
2032 			if (unlikely(ret != 0))
2033 				return ret;
2034 
2035 			ret = vmw_resource_relocation_add
2036 				(sw_context, res,
2037 				 vmw_ptr_diff(sw_context->buf_start,
2038 					      &cmd->body.shid),
2039 				 vmw_res_rel_normal);
2040 			if (unlikely(ret != 0))
2041 				return ret;
2042 		}
2043 	}
2044 
2045 	if (IS_ERR_OR_NULL(res)) {
2046 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2047 					VMW_RES_DIRTY_NONE,
2048 					user_shader_converter, &cmd->body.shid,
2049 					&res);
2050 		if (unlikely(ret != 0))
2051 			return ret;
2052 	}
2053 
2054 	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2055 	if (!ctx_info)
2056 		return -EINVAL;
2057 
2058 	binding.bi.ctx = ctx;
2059 	binding.bi.res = res;
2060 	binding.bi.bt = vmw_ctx_binding_shader;
2061 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2062 	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2063 
2064 	return 0;
2065 }
2066 
2067 /**
2068  * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2069  *
2070  * @dev_priv: Pointer to a device private struct.
2071  * @sw_context: The software context being used for this batch.
2072  * @header: Pointer to the command header in the command stream.
2073  */
2074 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2075 				    struct vmw_sw_context *sw_context,
2076 				    SVGA3dCmdHeader *header)
2077 {
2078 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2079 	int ret;
2080 
2081 	cmd = container_of(header, typeof(*cmd), header);
2082 
2083 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2084 				VMW_RES_DIRTY_SET, user_context_converter,
2085 				&cmd->body.cid, NULL);
2086 	if (unlikely(ret != 0))
2087 		return ret;
2088 
2089 	if (dev_priv->has_mob)
2090 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2091 
2092 	return 0;
2093 }
2094 
2095 /**
2096  * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2097  *
2098  * @dev_priv: Pointer to a device private struct.
2099  * @sw_context: The software context being used for this batch.
2100  * @header: Pointer to the command header in the command stream.
2101  */
2102 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2103 				  struct vmw_sw_context *sw_context,
2104 				  SVGA3dCmdHeader *header)
2105 {
2106 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2107 		container_of(header, typeof(*cmd), header);
2108 
2109 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2110 				     user_shader_converter, &cmd->body.shid,
2111 				     &cmd->body.mobid, cmd->body.offsetInBytes);
2112 }
2113 
2114 /**
2115  * vmw_cmd_dx_set_single_constant_buffer - Validate
2116  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2117  *
2118  * @dev_priv: Pointer to a device private struct.
2119  * @sw_context: The software context being used for this batch.
2120  * @header: Pointer to the command header in the command stream.
2121  */
2122 static int
2123 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2124 				      struct vmw_sw_context *sw_context,
2125 				      SVGA3dCmdHeader *header)
2126 {
2127 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2128 	SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2129 		SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2130 
2131 	struct vmw_resource *res = NULL;
2132 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2133 	struct vmw_ctx_bindinfo_cb binding;
2134 	int ret;
2135 
2136 	if (!ctx_node)
2137 		return -EINVAL;
2138 
2139 	cmd = container_of(header, typeof(*cmd), header);
2140 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2141 				VMW_RES_DIRTY_NONE, user_surface_converter,
2142 				&cmd->body.sid, &res);
2143 	if (unlikely(ret != 0))
2144 		return ret;
2145 
2146 	binding.bi.ctx = ctx_node->ctx;
2147 	binding.bi.res = res;
2148 	binding.bi.bt = vmw_ctx_binding_cb;
2149 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2150 	binding.offset = cmd->body.offsetInBytes;
2151 	binding.size = cmd->body.sizeInBytes;
2152 	binding.slot = cmd->body.slot;
2153 
2154 	if (binding.shader_slot >= max_shader_num ||
2155 	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2156 		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2157 			       (unsigned int) cmd->body.type,
2158 			       (unsigned int) binding.slot);
2159 		return -EINVAL;
2160 	}
2161 
2162 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2163 			binding.slot);
2164 
2165 	return 0;
2166 }
2167 
2168 /**
2169  * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2170  * command
2171  *
2172  * @dev_priv: Pointer to a device private struct.
2173  * @sw_context: The software context being used for this batch.
2174  * @header: Pointer to the command header in the command stream.
2175  */
2176 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2177 				     struct vmw_sw_context *sw_context,
2178 				     SVGA3dCmdHeader *header)
2179 {
2180 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2181 		container_of(header, typeof(*cmd), header);
2182 	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2183 		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2184 
2185 	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2186 		sizeof(SVGA3dShaderResourceViewId);
2187 
2188 	if ((u64) cmd->body.startView + (u64) num_sr_view >
2189 	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2190 	    cmd->body.type >= max_allowed) {
2191 		VMW_DEBUG_USER("Invalid shader binding.\n");
2192 		return -EINVAL;
2193 	}
2194 
2195 	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2196 				     vmw_ctx_binding_sr,
2197 				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2198 				     (void *) &cmd[1], num_sr_view,
2199 				     cmd->body.startView);
2200 }
2201 
2202 /**
2203  * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2204  *
2205  * @dev_priv: Pointer to a device private struct.
2206  * @sw_context: The software context being used for this batch.
2207  * @header: Pointer to the command header in the command stream.
2208  */
2209 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2210 				 struct vmw_sw_context *sw_context,
2211 				 SVGA3dCmdHeader *header)
2212 {
2213 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2214 	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2215 		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2216 	struct vmw_resource *res = NULL;
2217 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2218 	struct vmw_ctx_bindinfo_shader binding;
2219 	int ret = 0;
2220 
2221 	if (!ctx_node)
2222 		return -EINVAL;
2223 
2224 	cmd = container_of(header, typeof(*cmd), header);
2225 
2226 	if (cmd->body.type >= max_allowed ||
2227 	    cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2228 		VMW_DEBUG_USER("Illegal shader type %u.\n",
2229 			       (unsigned int) cmd->body.type);
2230 		return -EINVAL;
2231 	}
2232 
2233 	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2234 		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2235 		if (IS_ERR(res)) {
2236 			VMW_DEBUG_USER("Could not find shader for binding.\n");
2237 			return PTR_ERR(res);
2238 		}
2239 
2240 		ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2241 						    VMW_RES_DIRTY_NONE);
2242 		if (ret)
2243 			return ret;
2244 	}
2245 
2246 	binding.bi.ctx = ctx_node->ctx;
2247 	binding.bi.res = res;
2248 	binding.bi.bt = vmw_ctx_binding_dx_shader;
2249 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2250 
2251 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2252 
2253 	return 0;
2254 }
2255 
2256 /**
2257  * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2258  * command
2259  *
2260  * @dev_priv: Pointer to a device private struct.
2261  * @sw_context: The software context being used for this batch.
2262  * @header: Pointer to the command header in the command stream.
2263  */
2264 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2265 					 struct vmw_sw_context *sw_context,
2266 					 SVGA3dCmdHeader *header)
2267 {
2268 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2269 	struct vmw_ctx_bindinfo_vb binding;
2270 	struct vmw_resource *res;
2271 	struct {
2272 		SVGA3dCmdHeader header;
2273 		SVGA3dCmdDXSetVertexBuffers body;
2274 		SVGA3dVertexBuffer buf[];
2275 	} *cmd;
2276 	int i, ret, num;
2277 
2278 	if (!ctx_node)
2279 		return -EINVAL;
2280 
2281 	cmd = container_of(header, typeof(*cmd), header);
2282 	num = (cmd->header.size - sizeof(cmd->body)) /
2283 		sizeof(SVGA3dVertexBuffer);
2284 	if ((u64)num + (u64)cmd->body.startBuffer >
2285 	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2286 		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2287 		return -EINVAL;
2288 	}
2289 
2290 	for (i = 0; i < num; i++) {
2291 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2292 					VMW_RES_DIRTY_NONE,
2293 					user_surface_converter,
2294 					&cmd->buf[i].sid, &res);
2295 		if (unlikely(ret != 0))
2296 			return ret;
2297 
2298 		binding.bi.ctx = ctx_node->ctx;
2299 		binding.bi.bt = vmw_ctx_binding_vb;
2300 		binding.bi.res = res;
2301 		binding.offset = cmd->buf[i].offset;
2302 		binding.stride = cmd->buf[i].stride;
2303 		binding.slot = i + cmd->body.startBuffer;
2304 
2305 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2306 	}
2307 
2308 	return 0;
2309 }
2310 
2311 /**
2312  * vmw_cmd_dx_set_index_buffer - Validate
2313  * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2314  *
2315  * @dev_priv: Pointer to a device private struct.
2316  * @sw_context: The software context being used for this batch.
2317  * @header: Pointer to the command header in the command stream.
2318  */
2319 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2320 				       struct vmw_sw_context *sw_context,
2321 				       SVGA3dCmdHeader *header)
2322 {
2323 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2324 	struct vmw_ctx_bindinfo_ib binding;
2325 	struct vmw_resource *res;
2326 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2327 	int ret;
2328 
2329 	if (!ctx_node)
2330 		return -EINVAL;
2331 
2332 	cmd = container_of(header, typeof(*cmd), header);
2333 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2334 				VMW_RES_DIRTY_NONE, user_surface_converter,
2335 				&cmd->body.sid, &res);
2336 	if (unlikely(ret != 0))
2337 		return ret;
2338 
2339 	binding.bi.ctx = ctx_node->ctx;
2340 	binding.bi.res = res;
2341 	binding.bi.bt = vmw_ctx_binding_ib;
2342 	binding.offset = cmd->body.offset;
2343 	binding.format = cmd->body.format;
2344 
2345 	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2346 
2347 	return 0;
2348 }
2349 
2350 /**
2351  * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2352  * command
2353  *
2354  * @dev_priv: Pointer to a device private struct.
2355  * @sw_context: The software context being used for this batch.
2356  * @header: Pointer to the command header in the command stream.
2357  */
2358 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2359 					struct vmw_sw_context *sw_context,
2360 					SVGA3dCmdHeader *header)
2361 {
2362 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2363 		container_of(header, typeof(*cmd), header);
2364 	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2365 		sizeof(SVGA3dRenderTargetViewId);
2366 	int ret;
2367 
2368 	if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2369 		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2370 		return -EINVAL;
2371 	}
2372 
2373 	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2374 				    0, &cmd->body.depthStencilViewId, 1, 0);
2375 	if (ret)
2376 		return ret;
2377 
2378 	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2379 				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2380 				     num_rt_view, 0);
2381 }
2382 
2383 /**
2384  * vmw_cmd_dx_clear_rendertarget_view - Validate
2385  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2386  *
2387  * @dev_priv: Pointer to a device private struct.
2388  * @sw_context: The software context being used for this batch.
2389  * @header: Pointer to the command header in the command stream.
2390  */
2391 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2392 					      struct vmw_sw_context *sw_context,
2393 					      SVGA3dCmdHeader *header)
2394 {
2395 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2396 		container_of(header, typeof(*cmd), header);
2397 	struct vmw_resource *ret;
2398 
2399 	ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2400 				  cmd->body.renderTargetViewId);
2401 
2402 	return PTR_ERR_OR_ZERO(ret);
2403 }
2404 
2405 /**
2406  * vmw_cmd_dx_clear_depthstencil_view - Validate
2407  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2408  *
2409  * @dev_priv: Pointer to a device private struct.
2410  * @sw_context: The software context being used for this batch.
2411  * @header: Pointer to the command header in the command stream.
2412  */
2413 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2414 					      struct vmw_sw_context *sw_context,
2415 					      SVGA3dCmdHeader *header)
2416 {
2417 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2418 		container_of(header, typeof(*cmd), header);
2419 	struct vmw_resource *ret;
2420 
2421 	ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2422 				  cmd->body.depthStencilViewId);
2423 
2424 	return PTR_ERR_OR_ZERO(ret);
2425 }
2426 
2427 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2428 				  struct vmw_sw_context *sw_context,
2429 				  SVGA3dCmdHeader *header)
2430 {
2431 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2432 	struct vmw_resource *srf;
2433 	struct vmw_resource *res;
2434 	enum vmw_view_type view_type;
2435 	int ret;
2436 	/*
2437 	 * This is based on the fact that all affected define commands have the
2438 	 * same initial command body layout.
2439 	 */
2440 	struct {
2441 		SVGA3dCmdHeader header;
2442 		uint32 defined_id;
2443 		uint32 sid;
2444 	} *cmd;
2445 
2446 	if (!ctx_node)
2447 		return -EINVAL;
2448 
2449 	view_type = vmw_view_cmd_to_type(header->id);
2450 	if (view_type == vmw_view_max)
2451 		return -EINVAL;
2452 
2453 	cmd = container_of(header, typeof(*cmd), header);
2454 	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2455 		VMW_DEBUG_USER("Invalid surface id.\n");
2456 		return -EINVAL;
2457 	}
2458 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2459 				VMW_RES_DIRTY_NONE, user_surface_converter,
2460 				&cmd->sid, &srf);
2461 	if (unlikely(ret != 0))
2462 		return ret;
2463 
2464 	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2465 	ret = vmw_cotable_notify(res, cmd->defined_id);
2466 	if (unlikely(ret != 0))
2467 		return ret;
2468 
2469 	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2470 			    cmd->defined_id, header,
2471 			    header->size + sizeof(*header),
2472 			    &sw_context->staged_cmd_res);
2473 }
2474 
2475 /**
2476  * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2477  *
2478  * @dev_priv: Pointer to a device private struct.
2479  * @sw_context: The software context being used for this batch.
2480  * @header: Pointer to the command header in the command stream.
2481  */
2482 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2483 				     struct vmw_sw_context *sw_context,
2484 				     SVGA3dCmdHeader *header)
2485 {
2486 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2487 	struct vmw_ctx_bindinfo_so_target binding;
2488 	struct vmw_resource *res;
2489 	struct {
2490 		SVGA3dCmdHeader header;
2491 		SVGA3dCmdDXSetSOTargets body;
2492 		SVGA3dSoTarget targets[];
2493 	} *cmd;
2494 	int i, ret, num;
2495 
2496 	if (!ctx_node)
2497 		return -EINVAL;
2498 
2499 	cmd = container_of(header, typeof(*cmd), header);
2500 	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2501 
2502 	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2503 		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2504 		return -EINVAL;
2505 	}
2506 
2507 	for (i = 0; i < num; i++) {
2508 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2509 					VMW_RES_DIRTY_SET,
2510 					user_surface_converter,
2511 					&cmd->targets[i].sid, &res);
2512 		if (unlikely(ret != 0))
2513 			return ret;
2514 
2515 		binding.bi.ctx = ctx_node->ctx;
2516 		binding.bi.res = res;
2517 		binding.bi.bt = vmw_ctx_binding_so_target;
2518 		binding.offset = cmd->targets[i].offset;
2519 		binding.size = cmd->targets[i].sizeInBytes;
2520 		binding.slot = i;
2521 
2522 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2523 	}
2524 
2525 	return 0;
2526 }
2527 
2528 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2529 				struct vmw_sw_context *sw_context,
2530 				SVGA3dCmdHeader *header)
2531 {
2532 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2533 	struct vmw_resource *res;
2534 	/*
2535 	 * This is based on the fact that all affected define commands have
2536 	 * the same initial command body layout.
2537 	 */
2538 	struct {
2539 		SVGA3dCmdHeader header;
2540 		uint32 defined_id;
2541 	} *cmd;
2542 	enum vmw_so_type so_type;
2543 	int ret;
2544 
2545 	if (!ctx_node)
2546 		return -EINVAL;
2547 
2548 	so_type = vmw_so_cmd_to_type(header->id);
2549 	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2550 	if (IS_ERR(res))
2551 		return PTR_ERR(res);
2552 	cmd = container_of(header, typeof(*cmd), header);
2553 	ret = vmw_cotable_notify(res, cmd->defined_id);
2554 
2555 	return ret;
2556 }
2557 
2558 /**
2559  * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2560  * command
2561  *
2562  * @dev_priv: Pointer to a device private struct.
2563  * @sw_context: The software context being used for this batch.
2564  * @header: Pointer to the command header in the command stream.
2565  */
2566 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2567 					struct vmw_sw_context *sw_context,
2568 					SVGA3dCmdHeader *header)
2569 {
2570 	struct {
2571 		SVGA3dCmdHeader header;
2572 		union {
2573 			SVGA3dCmdDXReadbackSubResource r_body;
2574 			SVGA3dCmdDXInvalidateSubResource i_body;
2575 			SVGA3dCmdDXUpdateSubResource u_body;
2576 			SVGA3dSurfaceId sid;
2577 		};
2578 	} *cmd;
2579 
2580 	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2581 		     offsetof(typeof(*cmd), sid));
2582 	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2583 		     offsetof(typeof(*cmd), sid));
2584 	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2585 		     offsetof(typeof(*cmd), sid));
2586 
2587 	cmd = container_of(header, typeof(*cmd), header);
2588 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2589 				 VMW_RES_DIRTY_NONE, user_surface_converter,
2590 				 &cmd->sid, NULL);
2591 }
2592 
2593 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2594 				struct vmw_sw_context *sw_context,
2595 				SVGA3dCmdHeader *header)
2596 {
2597 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2598 
2599 	if (!ctx_node)
2600 		return -EINVAL;
2601 
2602 	return 0;
2603 }
2604 
2605 /**
2606  * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2607  * resource for removal.
2608  *
2609  * @dev_priv: Pointer to a device private struct.
2610  * @sw_context: The software context being used for this batch.
2611  * @header: Pointer to the command header in the command stream.
2612  *
2613  * Check that the view exists, and if it was not created using this command
2614  * batch, conditionally make this command a NOP.
2615  */
2616 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2617 				  struct vmw_sw_context *sw_context,
2618 				  SVGA3dCmdHeader *header)
2619 {
2620 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2621 	struct {
2622 		SVGA3dCmdHeader header;
2623 		union vmw_view_destroy body;
2624 	} *cmd = container_of(header, typeof(*cmd), header);
2625 	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2626 	struct vmw_resource *view;
2627 	int ret;
2628 
2629 	if (!ctx_node)
2630 		return -EINVAL;
2631 
2632 	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2633 			      &sw_context->staged_cmd_res, &view);
2634 	if (ret || !view)
2635 		return ret;
2636 
2637 	/*
2638 	 * If the view wasn't created during this command batch, it might
2639 	 * have been removed due to a context swapout, so add a
2640 	 * relocation to conditionally make this command a NOP to avoid
2641 	 * device errors.
2642 	 */
2643 	return vmw_resource_relocation_add(sw_context, view,
2644 					   vmw_ptr_diff(sw_context->buf_start,
2645 							&cmd->header.id),
2646 					   vmw_res_rel_cond_nop);
2647 }
2648 
2649 /**
2650  * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2651  *
2652  * @dev_priv: Pointer to a device private struct.
2653  * @sw_context: The software context being used for this batch.
2654  * @header: Pointer to the command header in the command stream.
2655  */
2656 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2657 				    struct vmw_sw_context *sw_context,
2658 				    SVGA3dCmdHeader *header)
2659 {
2660 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2661 	struct vmw_resource *res;
2662 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2663 		container_of(header, typeof(*cmd), header);
2664 	int ret;
2665 
2666 	if (!ctx_node)
2667 		return -EINVAL;
2668 
2669 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2670 	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2671 	if (ret)
2672 		return ret;
2673 
2674 	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2675 				 cmd->body.shaderId, cmd->body.type,
2676 				 &sw_context->staged_cmd_res);
2677 }
2678 
2679 /**
2680  * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2681  *
2682  * @dev_priv: Pointer to a device private struct.
2683  * @sw_context: The software context being used for this batch.
2684  * @header: Pointer to the command header in the command stream.
2685  */
2686 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2687 				     struct vmw_sw_context *sw_context,
2688 				     SVGA3dCmdHeader *header)
2689 {
2690 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2691 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2692 		container_of(header, typeof(*cmd), header);
2693 	int ret;
2694 
2695 	if (!ctx_node)
2696 		return -EINVAL;
2697 
2698 	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2699 				&sw_context->staged_cmd_res);
2700 
2701 	return ret;
2702 }
2703 
2704 /**
2705  * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2706  *
2707  * @dev_priv: Pointer to a device private struct.
2708  * @sw_context: The software context being used for this batch.
2709  * @header: Pointer to the command header in the command stream.
2710  */
2711 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2712 				  struct vmw_sw_context *sw_context,
2713 				  SVGA3dCmdHeader *header)
2714 {
2715 	struct vmw_resource *ctx;
2716 	struct vmw_resource *res;
2717 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2718 		container_of(header, typeof(*cmd), header);
2719 	int ret;
2720 
2721 	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2722 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2723 					VMW_RES_DIRTY_SET,
2724 					user_context_converter, &cmd->body.cid,
2725 					&ctx);
2726 		if (ret)
2727 			return ret;
2728 	} else {
2729 		struct vmw_ctx_validation_info *ctx_node =
2730 			VMW_GET_CTX_NODE(sw_context);
2731 
2732 		if (!ctx_node)
2733 			return -EINVAL;
2734 
2735 		ctx = ctx_node->ctx;
2736 	}
2737 
2738 	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2739 	if (IS_ERR(res)) {
2740 		VMW_DEBUG_USER("Could not find shader to bind.\n");
2741 		return PTR_ERR(res);
2742 	}
2743 
2744 	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2745 					    VMW_RES_DIRTY_NONE);
2746 	if (ret) {
2747 		VMW_DEBUG_USER("Error creating resource validation node.\n");
2748 		return ret;
2749 	}
2750 
2751 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2752 					 &cmd->body.mobid,
2753 					 cmd->body.offsetInBytes);
2754 }
2755 
2756 /**
2757  * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2758  *
2759  * @dev_priv: Pointer to a device private struct.
2760  * @sw_context: The software context being used for this batch.
2761  * @header: Pointer to the command header in the command stream.
2762  */
2763 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2764 			      struct vmw_sw_context *sw_context,
2765 			      SVGA3dCmdHeader *header)
2766 {
2767 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2768 		container_of(header, typeof(*cmd), header);
2769 	struct vmw_resource *view;
2770 	struct vmw_res_cache_entry *rcache;
2771 
2772 	view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2773 				   cmd->body.shaderResourceViewId);
2774 	if (IS_ERR(view))
2775 		return PTR_ERR(view);
2776 
2777 	/*
2778 	 * Normally the shader-resource view is not gpu-dirtying, but for
2779 	 * this particular command it is...
2780 	 * So mark the last looked-up surface, which is the surface
2781 	 * the view points to, gpu-dirty.
2782 	 */
2783 	rcache = &sw_context->res_cache[vmw_res_surface];
2784 	vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2785 				     VMW_RES_DIRTY_SET);
2786 	return 0;
2787 }
2788 
2789 /**
2790  * vmw_cmd_dx_transfer_from_buffer - Validate
2791  * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2792  *
2793  * @dev_priv: Pointer to a device private struct.
2794  * @sw_context: The software context being used for this batch.
2795  * @header: Pointer to the command header in the command stream.
2796  */
2797 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2798 					   struct vmw_sw_context *sw_context,
2799 					   SVGA3dCmdHeader *header)
2800 {
2801 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2802 		container_of(header, typeof(*cmd), header);
2803 	int ret;
2804 
2805 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2806 				VMW_RES_DIRTY_NONE, user_surface_converter,
2807 				&cmd->body.srcSid, NULL);
2808 	if (ret != 0)
2809 		return ret;
2810 
2811 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2812 				 VMW_RES_DIRTY_SET, user_surface_converter,
2813 				 &cmd->body.destSid, NULL);
2814 }
2815 
2816 /**
2817  * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2818  *
2819  * @dev_priv: Pointer to a device private struct.
2820  * @sw_context: The software context being used for this batch.
2821  * @header: Pointer to the command header in the command stream.
2822  */
2823 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2824 					   struct vmw_sw_context *sw_context,
2825 					   SVGA3dCmdHeader *header)
2826 {
2827 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2828 		container_of(header, typeof(*cmd), header);
2829 
2830 	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2831 		return -EINVAL;
2832 
2833 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2834 				 VMW_RES_DIRTY_SET, user_surface_converter,
2835 				 &cmd->body.surface.sid, NULL);
2836 }
2837 
2838 static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2839 		       struct vmw_sw_context *sw_context,
2840 		       SVGA3dCmdHeader *header)
2841 {
2842 	if (!has_sm5_context(dev_priv))
2843 		return -EINVAL;
2844 
2845 	return 0;
2846 }
2847 
2848 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2849 				   struct vmw_sw_context *sw_context,
2850 				   SVGA3dCmdHeader *header)
2851 {
2852 	if (!has_sm5_context(dev_priv))
2853 		return -EINVAL;
2854 
2855 	return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2856 }
2857 
2858 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2859 				   struct vmw_sw_context *sw_context,
2860 				   SVGA3dCmdHeader *header)
2861 {
2862 	if (!has_sm5_context(dev_priv))
2863 		return -EINVAL;
2864 
2865 	return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2866 }
2867 
2868 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2869 				  struct vmw_sw_context *sw_context,
2870 				  SVGA3dCmdHeader *header)
2871 {
2872 	struct {
2873 		SVGA3dCmdHeader header;
2874 		SVGA3dCmdDXClearUAViewUint body;
2875 	} *cmd = container_of(header, typeof(*cmd), header);
2876 	struct vmw_resource *ret;
2877 
2878 	if (!has_sm5_context(dev_priv))
2879 		return -EINVAL;
2880 
2881 	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2882 				  cmd->body.uaViewId);
2883 
2884 	return PTR_ERR_OR_ZERO(ret);
2885 }
2886 
2887 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2888 				   struct vmw_sw_context *sw_context,
2889 				   SVGA3dCmdHeader *header)
2890 {
2891 	struct {
2892 		SVGA3dCmdHeader header;
2893 		SVGA3dCmdDXClearUAViewFloat body;
2894 	} *cmd = container_of(header, typeof(*cmd), header);
2895 	struct vmw_resource *ret;
2896 
2897 	if (!has_sm5_context(dev_priv))
2898 		return -EINVAL;
2899 
2900 	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2901 				  cmd->body.uaViewId);
2902 
2903 	return PTR_ERR_OR_ZERO(ret);
2904 }
2905 
2906 static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2907 			   struct vmw_sw_context *sw_context,
2908 			   SVGA3dCmdHeader *header)
2909 {
2910 	struct {
2911 		SVGA3dCmdHeader header;
2912 		SVGA3dCmdDXSetUAViews body;
2913 	} *cmd = container_of(header, typeof(*cmd), header);
2914 	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2915 		sizeof(SVGA3dUAViewId);
2916 	int ret;
2917 
2918 	if (!has_sm5_context(dev_priv))
2919 		return -EINVAL;
2920 
2921 	if (num_uav > SVGA3D_MAX_UAVIEWS) {
2922 		VMW_DEBUG_USER("Invalid UAV binding.\n");
2923 		return -EINVAL;
2924 	}
2925 
2926 	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2927 				    vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2928 				    num_uav, 0);
2929 	if (ret)
2930 		return ret;
2931 
2932 	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2933 					 cmd->body.uavSpliceIndex);
2934 
2935 	return ret;
2936 }
2937 
2938 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2939 			      struct vmw_sw_context *sw_context,
2940 			      SVGA3dCmdHeader *header)
2941 {
2942 	struct {
2943 		SVGA3dCmdHeader header;
2944 		SVGA3dCmdDXSetCSUAViews body;
2945 	} *cmd = container_of(header, typeof(*cmd), header);
2946 	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2947 		sizeof(SVGA3dUAViewId);
2948 	int ret;
2949 
2950 	if (!has_sm5_context(dev_priv))
2951 		return -EINVAL;
2952 
2953 	if (num_uav > SVGA3D_MAX_UAVIEWS) {
2954 		VMW_DEBUG_USER("Invalid UAV binding.\n");
2955 		return -EINVAL;
2956 	}
2957 
2958 	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2959 				    vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2960 				    num_uav, 0);
2961 	if (ret)
2962 		return ret;
2963 
2964 	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2965 				  cmd->body.startIndex);
2966 
2967 	return ret;
2968 }
2969 
2970 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2971 					  struct vmw_sw_context *sw_context,
2972 					  SVGA3dCmdHeader *header)
2973 {
2974 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2975 	struct vmw_resource *res;
2976 	struct {
2977 		SVGA3dCmdHeader header;
2978 		SVGA3dCmdDXDefineStreamOutputWithMob body;
2979 	} *cmd = container_of(header, typeof(*cmd), header);
2980 	int ret;
2981 
2982 	if (!has_sm5_context(dev_priv))
2983 		return -EINVAL;
2984 
2985 	if (!ctx_node) {
2986 		DRM_ERROR("DX Context not set.\n");
2987 		return -EINVAL;
2988 	}
2989 
2990 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
2991 	ret = vmw_cotable_notify(res, cmd->body.soid);
2992 	if (ret)
2993 		return ret;
2994 
2995 	return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
2996 				       cmd->body.soid,
2997 				       &sw_context->staged_cmd_res);
2998 }
2999 
3000 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3001 					   struct vmw_sw_context *sw_context,
3002 					   SVGA3dCmdHeader *header)
3003 {
3004 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3005 	struct vmw_resource *res;
3006 	struct {
3007 		SVGA3dCmdHeader header;
3008 		SVGA3dCmdDXDestroyStreamOutput body;
3009 	} *cmd = container_of(header, typeof(*cmd), header);
3010 
3011 	if (!ctx_node) {
3012 		DRM_ERROR("DX Context not set.\n");
3013 		return -EINVAL;
3014 	}
3015 
3016 	/*
3017 	 * When device does not support SM5 then streamoutput with mob command is
3018 	 * not available to user-space. Simply return in this case.
3019 	 */
3020 	if (!has_sm5_context(dev_priv))
3021 		return 0;
3022 
3023 	/*
3024 	 * With SM5 capable device if lookup fails then user-space probably used
3025 	 * old streamoutput define command. Return without an error.
3026 	 */
3027 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3028 					 cmd->body.soid);
3029 	if (IS_ERR(res))
3030 		return 0;
3031 
3032 	return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3033 					  &sw_context->staged_cmd_res);
3034 }
3035 
3036 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3037 					struct vmw_sw_context *sw_context,
3038 					SVGA3dCmdHeader *header)
3039 {
3040 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3041 	struct vmw_resource *res;
3042 	struct {
3043 		SVGA3dCmdHeader header;
3044 		SVGA3dCmdDXBindStreamOutput body;
3045 	} *cmd = container_of(header, typeof(*cmd), header);
3046 	int ret;
3047 
3048 	if (!has_sm5_context(dev_priv))
3049 		return -EINVAL;
3050 
3051 	if (!ctx_node) {
3052 		DRM_ERROR("DX Context not set.\n");
3053 		return -EINVAL;
3054 	}
3055 
3056 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3057 					 cmd->body.soid);
3058 	if (IS_ERR(res)) {
3059 		DRM_ERROR("Could not find streamoutput to bind.\n");
3060 		return PTR_ERR(res);
3061 	}
3062 
3063 	vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3064 
3065 	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3066 					    VMW_RES_DIRTY_NONE);
3067 	if (ret) {
3068 		DRM_ERROR("Error creating resource validation node.\n");
3069 		return ret;
3070 	}
3071 
3072 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3073 					 &cmd->body.mobid,
3074 					 cmd->body.offsetInBytes);
3075 }
3076 
3077 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3078 				       struct vmw_sw_context *sw_context,
3079 				       SVGA3dCmdHeader *header)
3080 {
3081 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3082 	struct vmw_resource *res;
3083 	struct vmw_ctx_bindinfo_so binding;
3084 	struct {
3085 		SVGA3dCmdHeader header;
3086 		SVGA3dCmdDXSetStreamOutput body;
3087 	} *cmd = container_of(header, typeof(*cmd), header);
3088 	int ret;
3089 
3090 	if (!ctx_node) {
3091 		DRM_ERROR("DX Context not set.\n");
3092 		return -EINVAL;
3093 	}
3094 
3095 	if (cmd->body.soid == SVGA3D_INVALID_ID)
3096 		return 0;
3097 
3098 	/*
3099 	 * When device does not support SM5 then streamoutput with mob command is
3100 	 * not available to user-space. Simply return in this case.
3101 	 */
3102 	if (!has_sm5_context(dev_priv))
3103 		return 0;
3104 
3105 	/*
3106 	 * With SM5 capable device if lookup fails then user-space probably used
3107 	 * old streamoutput define command. Return without an error.
3108 	 */
3109 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3110 					 cmd->body.soid);
3111 	if (IS_ERR(res)) {
3112 		return 0;
3113 	}
3114 
3115 	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3116 					    VMW_RES_DIRTY_NONE);
3117 	if (ret) {
3118 		DRM_ERROR("Error creating resource validation node.\n");
3119 		return ret;
3120 	}
3121 
3122 	binding.bi.ctx = ctx_node->ctx;
3123 	binding.bi.res = res;
3124 	binding.bi.bt = vmw_ctx_binding_so;
3125 	binding.slot = 0; /* Only one SO set to context at a time. */
3126 
3127 	vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3128 			binding.slot);
3129 
3130 	return ret;
3131 }
3132 
3133 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3134 					      struct vmw_sw_context *sw_context,
3135 					      SVGA3dCmdHeader *header)
3136 {
3137 	struct vmw_draw_indexed_instanced_indirect_cmd {
3138 		SVGA3dCmdHeader header;
3139 		SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3140 	} *cmd = container_of(header, typeof(*cmd), header);
3141 
3142 	if (!has_sm5_context(dev_priv))
3143 		return -EINVAL;
3144 
3145 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3146 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3147 				 &cmd->body.argsBufferSid, NULL);
3148 }
3149 
3150 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3151 				      struct vmw_sw_context *sw_context,
3152 				      SVGA3dCmdHeader *header)
3153 {
3154 	struct vmw_draw_instanced_indirect_cmd {
3155 		SVGA3dCmdHeader header;
3156 		SVGA3dCmdDXDrawInstancedIndirect body;
3157 	} *cmd = container_of(header, typeof(*cmd), header);
3158 
3159 	if (!has_sm5_context(dev_priv))
3160 		return -EINVAL;
3161 
3162 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3163 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3164 				 &cmd->body.argsBufferSid, NULL);
3165 }
3166 
3167 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3168 				     struct vmw_sw_context *sw_context,
3169 				     SVGA3dCmdHeader *header)
3170 {
3171 	struct vmw_dispatch_indirect_cmd {
3172 		SVGA3dCmdHeader header;
3173 		SVGA3dCmdDXDispatchIndirect body;
3174 	} *cmd = container_of(header, typeof(*cmd), header);
3175 
3176 	if (!has_sm5_context(dev_priv))
3177 		return -EINVAL;
3178 
3179 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3180 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3181 				 &cmd->body.argsBufferSid, NULL);
3182 }
3183 
3184 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3185 				struct vmw_sw_context *sw_context,
3186 				void *buf, uint32_t *size)
3187 {
3188 	uint32_t size_remaining = *size;
3189 	uint32_t cmd_id;
3190 
3191 	cmd_id = ((uint32_t *)buf)[0];
3192 	switch (cmd_id) {
3193 	case SVGA_CMD_UPDATE:
3194 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3195 		break;
3196 	case SVGA_CMD_DEFINE_GMRFB:
3197 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3198 		break;
3199 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3200 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3201 		break;
3202 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3203 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3204 		break;
3205 	default:
3206 		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3207 		return -EINVAL;
3208 	}
3209 
3210 	if (*size > size_remaining) {
3211 		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3212 			       cmd_id);
3213 		return -EINVAL;
3214 	}
3215 
3216 	if (unlikely(!sw_context->kernel)) {
3217 		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3218 		return -EPERM;
3219 	}
3220 
3221 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3222 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3223 
3224 	return 0;
3225 }
3226 
3227 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3228 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3229 		    false, false, false),
3230 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3231 		    false, false, false),
3232 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3233 		    true, false, false),
3234 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3235 		    true, false, false),
3236 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3237 		    true, false, false),
3238 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3239 		    false, false, false),
3240 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3241 		    false, false, false),
3242 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3243 		    true, false, false),
3244 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3245 		    true, false, false),
3246 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3247 		    true, false, false),
3248 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3249 		    &vmw_cmd_set_render_target_check, true, false, false),
3250 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3251 		    true, false, false),
3252 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3253 		    true, false, false),
3254 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3255 		    true, false, false),
3256 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3257 		    true, false, false),
3258 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3259 		    true, false, false),
3260 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3261 		    true, false, false),
3262 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3263 		    true, false, false),
3264 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3265 		    false, false, false),
3266 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3267 		    true, false, false),
3268 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3269 		    true, false, false),
3270 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3271 		    true, false, false),
3272 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3273 		    true, false, false),
3274 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3275 		    true, false, false),
3276 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3277 		    true, false, false),
3278 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3279 		    true, false, false),
3280 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3281 		    true, false, false),
3282 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3283 		    true, false, false),
3284 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3285 		    true, false, false),
3286 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3287 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3288 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3289 		    false, false, false),
3290 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3291 		    false, false, false),
3292 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3293 		    false, false, false),
3294 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3295 		    false, false, false),
3296 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3297 		    false, false, false),
3298 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3299 		    false, false, false),
3300 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3301 		    false, false, false),
3302 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3303 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3304 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3305 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3306 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3307 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3308 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3309 		    false, false, true),
3310 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3311 		    false, false, true),
3312 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3313 		    false, false, true),
3314 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3315 		    false, false, true),
3316 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3317 		    false, false, true),
3318 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3319 		    false, false, true),
3320 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3321 		    false, false, true),
3322 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3323 		    false, false, true),
3324 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3325 		    true, false, true),
3326 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3327 		    false, false, true),
3328 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3329 		    true, false, true),
3330 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3331 		    &vmw_cmd_update_gb_surface, true, false, true),
3332 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3333 		    &vmw_cmd_readback_gb_image, true, false, true),
3334 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3335 		    &vmw_cmd_readback_gb_surface, true, false, true),
3336 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3337 		    &vmw_cmd_invalidate_gb_image, true, false, true),
3338 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3339 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3340 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3341 		    false, false, true),
3342 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3343 		    false, false, true),
3344 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3345 		    false, false, true),
3346 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3347 		    false, false, true),
3348 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3349 		    false, false, true),
3350 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3351 		    false, false, true),
3352 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3353 		    true, false, true),
3354 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3355 		    false, false, true),
3356 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3357 		    false, false, false),
3358 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3359 		    true, false, true),
3360 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3361 		    true, false, true),
3362 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3363 		    true, false, true),
3364 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3365 		    true, false, true),
3366 	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3367 		    true, false, true),
3368 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3369 		    false, false, true),
3370 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3371 		    false, false, true),
3372 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3373 		    false, false, true),
3374 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3375 		    false, false, true),
3376 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3377 		    false, false, true),
3378 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3379 		    false, false, true),
3380 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3381 		    false, false, true),
3382 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3383 		    false, false, true),
3384 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3385 		    false, false, true),
3386 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3387 		    false, false, true),
3388 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3389 		    true, false, true),
3390 	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3391 		    false, false, true),
3392 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3393 		    false, false, true),
3394 	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3395 		    false, false, true),
3396 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3397 		    false, false, true),
3398 
3399 	/* SM commands */
3400 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3401 		    false, false, true),
3402 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3403 		    false, false, true),
3404 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3405 		    false, false, true),
3406 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3407 		    false, false, true),
3408 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3409 		    false, false, true),
3410 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3411 		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3412 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3413 		    &vmw_cmd_dx_set_shader_res, true, false, true),
3414 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3415 		    true, false, true),
3416 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3417 		    true, false, true),
3418 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3419 		    true, false, true),
3420 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3421 		    true, false, true),
3422 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3423 		    true, false, true),
3424 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3425 		    &vmw_cmd_dx_cid_check, true, false, true),
3426 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3427 		    true, false, true),
3428 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3429 		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3430 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3431 		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3432 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3433 		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3434 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3435 		    true, false, true),
3436 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3437 		    &vmw_cmd_dx_cid_check, true, false, true),
3438 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3439 		    &vmw_cmd_dx_cid_check, true, false, true),
3440 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3441 		    true, false, true),
3442 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3443 		    true, false, true),
3444 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3445 		    true, false, true),
3446 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3447 		    &vmw_cmd_dx_cid_check, true, false, true),
3448 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3449 		    true, false, true),
3450 	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3451 		    true, false, true),
3452 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3453 		    true, false, true),
3454 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3455 		    true, false, true),
3456 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3457 		    true, false, true),
3458 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3459 		    true, false, true),
3460 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3461 		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3462 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3463 		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3464 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3465 		    true, false, true),
3466 	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3467 		    true, false, true),
3468 	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3469 		    &vmw_cmd_dx_check_subresource, true, false, true),
3470 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3471 		    &vmw_cmd_dx_check_subresource, true, false, true),
3472 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3473 		    &vmw_cmd_dx_check_subresource, true, false, true),
3474 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3475 		    &vmw_cmd_dx_view_define, true, false, true),
3476 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3477 		    &vmw_cmd_dx_view_remove, true, false, true),
3478 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3479 		    &vmw_cmd_dx_view_define, true, false, true),
3480 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3481 		    &vmw_cmd_dx_view_remove, true, false, true),
3482 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3483 		    &vmw_cmd_dx_view_define, true, false, true),
3484 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3485 		    &vmw_cmd_dx_view_remove, true, false, true),
3486 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3487 		    &vmw_cmd_dx_so_define, true, false, true),
3488 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3489 		    &vmw_cmd_dx_cid_check, true, false, true),
3490 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3491 		    &vmw_cmd_dx_so_define, true, false, true),
3492 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3493 		    &vmw_cmd_dx_cid_check, true, false, true),
3494 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3495 		    &vmw_cmd_dx_so_define, true, false, true),
3496 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3497 		    &vmw_cmd_dx_cid_check, true, false, true),
3498 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3499 		    &vmw_cmd_dx_so_define, true, false, true),
3500 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3501 		    &vmw_cmd_dx_cid_check, true, false, true),
3502 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3503 		    &vmw_cmd_dx_so_define, true, false, true),
3504 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3505 		    &vmw_cmd_dx_cid_check, true, false, true),
3506 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3507 		    &vmw_cmd_dx_define_shader, true, false, true),
3508 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3509 		    &vmw_cmd_dx_destroy_shader, true, false, true),
3510 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3511 		    &vmw_cmd_dx_bind_shader, true, false, true),
3512 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3513 		    &vmw_cmd_dx_so_define, true, false, true),
3514 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3515 		    &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3516 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3517 		    &vmw_cmd_dx_set_streamoutput, true, false, true),
3518 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3519 		    &vmw_cmd_dx_set_so_targets, true, false, true),
3520 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3521 		    &vmw_cmd_dx_cid_check, true, false, true),
3522 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3523 		    &vmw_cmd_dx_cid_check, true, false, true),
3524 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3525 		    &vmw_cmd_buffer_copy_check, true, false, true),
3526 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3527 		    &vmw_cmd_pred_copy_check, true, false, true),
3528 	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3529 		    &vmw_cmd_dx_transfer_from_buffer,
3530 		    true, false, true),
3531 	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3532 		    true, false, true),
3533 
3534 	/*
3535 	 * SM5 commands
3536 	 */
3537 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3538 		    true, false, true),
3539 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3540 		    true, false, true),
3541 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3542 		    true, false, true),
3543 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3544 		    &vmw_cmd_clear_uav_float, true, false, true),
3545 	VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3546 		    false, true),
3547 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3548 		    true),
3549 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3550 		    &vmw_cmd_indexed_instanced_indirect, true, false, true),
3551 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3552 		    &vmw_cmd_instanced_indirect, true, false, true),
3553 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3554 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3555 		    &vmw_cmd_dispatch_indirect, true, false, true),
3556 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3557 		    false, true),
3558 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3559 		    &vmw_cmd_sm5_view_define, true, false, true),
3560 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3561 		    &vmw_cmd_dx_define_streamoutput, true, false, true),
3562 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3563 		    &vmw_cmd_dx_bind_streamoutput, true, false, true),
3564 };
3565 
3566 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3567 {
3568 	u32 cmd_id = ((u32 *) buf)[0];
3569 
3570 	if (cmd_id >= SVGA_CMD_MAX) {
3571 		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3572 		const struct vmw_cmd_entry *entry;
3573 
3574 		*size = header->size + sizeof(SVGA3dCmdHeader);
3575 		cmd_id = header->id;
3576 		if (cmd_id >= SVGA_3D_CMD_MAX)
3577 			return false;
3578 
3579 		cmd_id -= SVGA_3D_CMD_BASE;
3580 		entry = &vmw_cmd_entries[cmd_id];
3581 		*cmd = entry->cmd_name;
3582 		return true;
3583 	}
3584 
3585 	switch (cmd_id) {
3586 	case SVGA_CMD_UPDATE:
3587 		*cmd = "SVGA_CMD_UPDATE";
3588 		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3589 		break;
3590 	case SVGA_CMD_DEFINE_GMRFB:
3591 		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3592 		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3593 		break;
3594 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3595 		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3596 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3597 		break;
3598 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3599 		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3600 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3601 		break;
3602 	default:
3603 		*cmd = "UNKNOWN";
3604 		*size = 0;
3605 		return false;
3606 	}
3607 
3608 	return true;
3609 }
3610 
3611 static int vmw_cmd_check(struct vmw_private *dev_priv,
3612 			 struct vmw_sw_context *sw_context, void *buf,
3613 			 uint32_t *size)
3614 {
3615 	uint32_t cmd_id;
3616 	uint32_t size_remaining = *size;
3617 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3618 	int ret;
3619 	const struct vmw_cmd_entry *entry;
3620 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3621 
3622 	cmd_id = ((uint32_t *)buf)[0];
3623 	/* Handle any none 3D commands */
3624 	if (unlikely(cmd_id < SVGA_CMD_MAX))
3625 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3626 
3627 
3628 	cmd_id = header->id;
3629 	*size = header->size + sizeof(SVGA3dCmdHeader);
3630 
3631 	cmd_id -= SVGA_3D_CMD_BASE;
3632 	if (unlikely(*size > size_remaining))
3633 		goto out_invalid;
3634 
3635 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3636 		goto out_invalid;
3637 
3638 	entry = &vmw_cmd_entries[cmd_id];
3639 	if (unlikely(!entry->func))
3640 		goto out_invalid;
3641 
3642 	if (unlikely(!entry->user_allow && !sw_context->kernel))
3643 		goto out_privileged;
3644 
3645 	if (unlikely(entry->gb_disable && gb))
3646 		goto out_old;
3647 
3648 	if (unlikely(entry->gb_enable && !gb))
3649 		goto out_new;
3650 
3651 	ret = entry->func(dev_priv, sw_context, header);
3652 	if (unlikely(ret != 0)) {
3653 		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3654 			       cmd_id + SVGA_3D_CMD_BASE, ret);
3655 		return ret;
3656 	}
3657 
3658 	return 0;
3659 out_invalid:
3660 	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3661 		       cmd_id + SVGA_3D_CMD_BASE);
3662 	return -EINVAL;
3663 out_privileged:
3664 	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3665 		       cmd_id + SVGA_3D_CMD_BASE);
3666 	return -EPERM;
3667 out_old:
3668 	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3669 		       cmd_id + SVGA_3D_CMD_BASE);
3670 	return -EINVAL;
3671 out_new:
3672 	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3673 		       cmd_id + SVGA_3D_CMD_BASE);
3674 	return -EINVAL;
3675 }
3676 
3677 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3678 			     struct vmw_sw_context *sw_context, void *buf,
3679 			     uint32_t size)
3680 {
3681 	int32_t cur_size = size;
3682 	int ret;
3683 
3684 	sw_context->buf_start = buf;
3685 
3686 	while (cur_size > 0) {
3687 		size = cur_size;
3688 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3689 		if (unlikely(ret != 0))
3690 			return ret;
3691 		buf = (void *)((unsigned long) buf + size);
3692 		cur_size -= size;
3693 	}
3694 
3695 	if (unlikely(cur_size != 0)) {
3696 		VMW_DEBUG_USER("Command verifier out of sync.\n");
3697 		return -EINVAL;
3698 	}
3699 
3700 	return 0;
3701 }
3702 
3703 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3704 {
3705 	/* Memory is validation context memory, so no need to free it */
3706 	INIT_LIST_HEAD(&sw_context->bo_relocations);
3707 }
3708 
3709 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3710 {
3711 	struct vmw_relocation *reloc;
3712 	struct ttm_buffer_object *bo;
3713 
3714 	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3715 		bo = &reloc->vbo->base;
3716 		switch (bo->resource->mem_type) {
3717 		case TTM_PL_VRAM:
3718 			reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3719 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3720 			break;
3721 		case VMW_PL_GMR:
3722 			reloc->location->gmrId = bo->resource->start;
3723 			break;
3724 		case VMW_PL_MOB:
3725 			*reloc->mob_loc = bo->resource->start;
3726 			break;
3727 		default:
3728 			BUG();
3729 		}
3730 	}
3731 	vmw_free_relocations(sw_context);
3732 }
3733 
3734 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3735 				 uint32_t size)
3736 {
3737 	if (likely(sw_context->cmd_bounce_size >= size))
3738 		return 0;
3739 
3740 	if (sw_context->cmd_bounce_size == 0)
3741 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3742 
3743 	while (sw_context->cmd_bounce_size < size) {
3744 		sw_context->cmd_bounce_size =
3745 			PAGE_ALIGN(sw_context->cmd_bounce_size +
3746 				   (sw_context->cmd_bounce_size >> 1));
3747 	}
3748 
3749 	vfree(sw_context->cmd_bounce);
3750 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3751 
3752 	if (sw_context->cmd_bounce == NULL) {
3753 		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3754 		sw_context->cmd_bounce_size = 0;
3755 		return -ENOMEM;
3756 	}
3757 
3758 	return 0;
3759 }
3760 
3761 /*
3762  * vmw_execbuf_fence_commands - create and submit a command stream fence
3763  *
3764  * Creates a fence object and submits a command stream marker.
3765  * If this fails for some reason, We sync the fifo and return NULL.
3766  * It is then safe to fence buffers with a NULL pointer.
3767  *
3768  * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3769  * userspace handle if @p_handle is not NULL, otherwise not.
3770  */
3771 
3772 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3773 			       struct vmw_private *dev_priv,
3774 			       struct vmw_fence_obj **p_fence,
3775 			       uint32_t *p_handle)
3776 {
3777 	uint32_t sequence;
3778 	int ret;
3779 	bool synced = false;
3780 
3781 	/* p_handle implies file_priv. */
3782 	BUG_ON(p_handle != NULL && file_priv == NULL);
3783 
3784 	ret = vmw_cmd_send_fence(dev_priv, &sequence);
3785 	if (unlikely(ret != 0)) {
3786 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3787 		synced = true;
3788 	}
3789 
3790 	if (p_handle != NULL)
3791 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3792 					    sequence, p_fence, p_handle);
3793 	else
3794 		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3795 
3796 	if (unlikely(ret != 0 && !synced)) {
3797 		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3798 					 false, VMW_FENCE_WAIT_TIMEOUT);
3799 		*p_fence = NULL;
3800 	}
3801 
3802 	return ret;
3803 }
3804 
3805 /**
3806  * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3807  *
3808  * @dev_priv: Pointer to a vmw_private struct.
3809  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3810  * @ret: Return value from fence object creation.
3811  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3812  * the information should be copied.
3813  * @fence: Pointer to the fenc object.
3814  * @fence_handle: User-space fence handle.
3815  * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3816  * @sync_file:  Only used to clean up in case of an error in this function.
3817  *
3818  * This function copies fence information to user-space. If copying fails, the
3819  * user-space struct drm_vmw_fence_rep::error member is hopefully left
3820  * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3821  * will hopefully be detected.
3822  *
3823  * Also if copying fails, user-space will be unable to signal the fence object
3824  * so we wait for it immediately, and then unreference the user-space reference.
3825  */
3826 void
3827 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3828 			    struct vmw_fpriv *vmw_fp, int ret,
3829 			    struct drm_vmw_fence_rep __user *user_fence_rep,
3830 			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3831 			    int32_t out_fence_fd, struct sync_file *sync_file)
3832 {
3833 	struct drm_vmw_fence_rep fence_rep;
3834 
3835 	if (user_fence_rep == NULL)
3836 		return;
3837 
3838 	memset(&fence_rep, 0, sizeof(fence_rep));
3839 
3840 	fence_rep.error = ret;
3841 	fence_rep.fd = out_fence_fd;
3842 	if (ret == 0) {
3843 		BUG_ON(fence == NULL);
3844 
3845 		fence_rep.handle = fence_handle;
3846 		fence_rep.seqno = fence->base.seqno;
3847 		vmw_update_seqno(dev_priv);
3848 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3849 	}
3850 
3851 	/*
3852 	 * copy_to_user errors will be detected by user space not seeing
3853 	 * fence_rep::error filled in. Typically user-space would have pre-set
3854 	 * that member to -EFAULT.
3855 	 */
3856 	ret = copy_to_user(user_fence_rep, &fence_rep,
3857 			   sizeof(fence_rep));
3858 
3859 	/*
3860 	 * User-space lost the fence object. We need to sync and unreference the
3861 	 * handle.
3862 	 */
3863 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3864 		if (sync_file)
3865 			fput(sync_file->file);
3866 
3867 		if (fence_rep.fd != -1) {
3868 			put_unused_fd(fence_rep.fd);
3869 			fence_rep.fd = -1;
3870 		}
3871 
3872 		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3873 					  TTM_REF_USAGE);
3874 		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3875 		(void) vmw_fence_obj_wait(fence, false, false,
3876 					  VMW_FENCE_WAIT_TIMEOUT);
3877 	}
3878 }
3879 
3880 /**
3881  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3882  *
3883  * @dev_priv: Pointer to a device private structure.
3884  * @kernel_commands: Pointer to the unpatched command batch.
3885  * @command_size: Size of the unpatched command batch.
3886  * @sw_context: Structure holding the relocation lists.
3887  *
3888  * Side effects: If this function returns 0, then the command batch pointed to
3889  * by @kernel_commands will have been modified.
3890  */
3891 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3892 				   void *kernel_commands, u32 command_size,
3893 				   struct vmw_sw_context *sw_context)
3894 {
3895 	void *cmd;
3896 
3897 	if (sw_context->dx_ctx_node)
3898 		cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3899 					  sw_context->dx_ctx_node->ctx->id);
3900 	else
3901 		cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3902 
3903 	if (!cmd)
3904 		return -ENOMEM;
3905 
3906 	vmw_apply_relocations(sw_context);
3907 	memcpy(cmd, kernel_commands, command_size);
3908 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3909 	vmw_resource_relocations_free(&sw_context->res_relocations);
3910 	vmw_cmd_commit(dev_priv, command_size);
3911 
3912 	return 0;
3913 }
3914 
3915 /**
3916  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3917  * command buffer manager.
3918  *
3919  * @dev_priv: Pointer to a device private structure.
3920  * @header: Opaque handle to the command buffer allocation.
3921  * @command_size: Size of the unpatched command batch.
3922  * @sw_context: Structure holding the relocation lists.
3923  *
3924  * Side effects: If this function returns 0, then the command buffer represented
3925  * by @header will have been modified.
3926  */
3927 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3928 				     struct vmw_cmdbuf_header *header,
3929 				     u32 command_size,
3930 				     struct vmw_sw_context *sw_context)
3931 {
3932 	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3933 		  SVGA3D_INVALID_ID);
3934 	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3935 				       header);
3936 
3937 	vmw_apply_relocations(sw_context);
3938 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3939 	vmw_resource_relocations_free(&sw_context->res_relocations);
3940 	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3941 
3942 	return 0;
3943 }
3944 
3945 /**
3946  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3947  * submission using a command buffer.
3948  *
3949  * @dev_priv: Pointer to a device private structure.
3950  * @user_commands: User-space pointer to the commands to be submitted.
3951  * @command_size: Size of the unpatched command batch.
3952  * @header: Out parameter returning the opaque pointer to the command buffer.
3953  *
3954  * This function checks whether we can use the command buffer manager for
3955  * submission and if so, creates a command buffer of suitable size and copies
3956  * the user data into that buffer.
3957  *
3958  * On successful return, the function returns a pointer to the data in the
3959  * command buffer and *@header is set to non-NULL.
3960  *
3961  * @kernel_commands: If command buffers could not be used, the function will
3962  * return the value of @kernel_commands on function call. That value may be
3963  * NULL. In that case, the value of *@header will be set to NULL.
3964  *
3965  * If an error is encountered, the function will return a pointer error value.
3966  * If the function is interrupted by a signal while sleeping, it will return
3967  * -ERESTARTSYS casted to a pointer error value.
3968  */
3969 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3970 				void __user *user_commands,
3971 				void *kernel_commands, u32 command_size,
3972 				struct vmw_cmdbuf_header **header)
3973 {
3974 	size_t cmdbuf_size;
3975 	int ret;
3976 
3977 	*header = NULL;
3978 	if (command_size > SVGA_CB_MAX_SIZE) {
3979 		VMW_DEBUG_USER("Command buffer is too large.\n");
3980 		return ERR_PTR(-EINVAL);
3981 	}
3982 
3983 	if (!dev_priv->cman || kernel_commands)
3984 		return kernel_commands;
3985 
3986 	/* If possible, add a little space for fencing. */
3987 	cmdbuf_size = command_size + 512;
3988 	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3989 	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3990 					   header);
3991 	if (IS_ERR(kernel_commands))
3992 		return kernel_commands;
3993 
3994 	ret = copy_from_user(kernel_commands, user_commands, command_size);
3995 	if (ret) {
3996 		VMW_DEBUG_USER("Failed copying commands.\n");
3997 		vmw_cmdbuf_header_free(*header);
3998 		*header = NULL;
3999 		return ERR_PTR(-EFAULT);
4000 	}
4001 
4002 	return kernel_commands;
4003 }
4004 
4005 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4006 				   struct vmw_sw_context *sw_context,
4007 				   uint32_t handle)
4008 {
4009 	struct vmw_resource *res;
4010 	int ret;
4011 	unsigned int size;
4012 
4013 	if (handle == SVGA3D_INVALID_ID)
4014 		return 0;
4015 
4016 	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4017 	ret = vmw_validation_preload_res(sw_context->ctx, size);
4018 	if (ret)
4019 		return ret;
4020 
4021 	res = vmw_user_resource_noref_lookup_handle
4022 		(dev_priv, sw_context->fp->tfile, handle,
4023 		 user_context_converter);
4024 	if (IS_ERR(res)) {
4025 		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4026 			       (unsigned int) handle);
4027 		return PTR_ERR(res);
4028 	}
4029 
4030 	ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
4031 	if (unlikely(ret != 0))
4032 		return ret;
4033 
4034 	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4035 	sw_context->man = vmw_context_res_man(res);
4036 
4037 	return 0;
4038 }
4039 
4040 int vmw_execbuf_process(struct drm_file *file_priv,
4041 			struct vmw_private *dev_priv,
4042 			void __user *user_commands, void *kernel_commands,
4043 			uint32_t command_size, uint64_t throttle_us,
4044 			uint32_t dx_context_handle,
4045 			struct drm_vmw_fence_rep __user *user_fence_rep,
4046 			struct vmw_fence_obj **out_fence, uint32_t flags)
4047 {
4048 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
4049 	struct vmw_fence_obj *fence = NULL;
4050 	struct vmw_cmdbuf_header *header;
4051 	uint32_t handle = 0;
4052 	int ret;
4053 	int32_t out_fence_fd = -1;
4054 	struct sync_file *sync_file = NULL;
4055 	DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
4056 
4057 	vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
4058 
4059 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4060 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4061 		if (out_fence_fd < 0) {
4062 			VMW_DEBUG_USER("Failed to get a fence fd.\n");
4063 			return out_fence_fd;
4064 		}
4065 	}
4066 
4067 	if (throttle_us) {
4068 		VMW_DEBUG_USER("Throttling is no longer supported.\n");
4069 	}
4070 
4071 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4072 					     kernel_commands, command_size,
4073 					     &header);
4074 	if (IS_ERR(kernel_commands)) {
4075 		ret = PTR_ERR(kernel_commands);
4076 		goto out_free_fence_fd;
4077 	}
4078 
4079 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4080 	if (ret) {
4081 		ret = -ERESTARTSYS;
4082 		goto out_free_header;
4083 	}
4084 
4085 	sw_context->kernel = false;
4086 	if (kernel_commands == NULL) {
4087 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
4088 		if (unlikely(ret != 0))
4089 			goto out_unlock;
4090 
4091 		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4092 				     command_size);
4093 		if (unlikely(ret != 0)) {
4094 			ret = -EFAULT;
4095 			VMW_DEBUG_USER("Failed copying commands.\n");
4096 			goto out_unlock;
4097 		}
4098 
4099 		kernel_commands = sw_context->cmd_bounce;
4100 	} else if (!header) {
4101 		sw_context->kernel = true;
4102 	}
4103 
4104 	sw_context->fp = vmw_fpriv(file_priv);
4105 	INIT_LIST_HEAD(&sw_context->ctx_list);
4106 	sw_context->cur_query_bo = dev_priv->pinned_bo;
4107 	sw_context->last_query_ctx = NULL;
4108 	sw_context->needs_post_query_barrier = false;
4109 	sw_context->dx_ctx_node = NULL;
4110 	sw_context->dx_query_mob = NULL;
4111 	sw_context->dx_query_ctx = NULL;
4112 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4113 	INIT_LIST_HEAD(&sw_context->res_relocations);
4114 	INIT_LIST_HEAD(&sw_context->bo_relocations);
4115 
4116 	if (sw_context->staged_bindings)
4117 		vmw_binding_state_reset(sw_context->staged_bindings);
4118 
4119 	if (!sw_context->res_ht_initialized) {
4120 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4121 		if (unlikely(ret != 0))
4122 			goto out_unlock;
4123 
4124 		sw_context->res_ht_initialized = true;
4125 	}
4126 
4127 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4128 	sw_context->ctx = &val_ctx;
4129 	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4130 	if (unlikely(ret != 0))
4131 		goto out_err_nores;
4132 
4133 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4134 				command_size);
4135 	if (unlikely(ret != 0))
4136 		goto out_err_nores;
4137 
4138 	ret = vmw_resources_reserve(sw_context);
4139 	if (unlikely(ret != 0))
4140 		goto out_err_nores;
4141 
4142 	ret = vmw_validation_bo_reserve(&val_ctx, true);
4143 	if (unlikely(ret != 0))
4144 		goto out_err_nores;
4145 
4146 	ret = vmw_validation_bo_validate(&val_ctx, true);
4147 	if (unlikely(ret != 0))
4148 		goto out_err;
4149 
4150 	ret = vmw_validation_res_validate(&val_ctx, true);
4151 	if (unlikely(ret != 0))
4152 		goto out_err;
4153 
4154 	vmw_validation_drop_ht(&val_ctx);
4155 
4156 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4157 	if (unlikely(ret != 0)) {
4158 		ret = -ERESTARTSYS;
4159 		goto out_err;
4160 	}
4161 
4162 	if (dev_priv->has_mob) {
4163 		ret = vmw_rebind_contexts(sw_context);
4164 		if (unlikely(ret != 0))
4165 			goto out_unlock_binding;
4166 	}
4167 
4168 	if (!header) {
4169 		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4170 					      command_size, sw_context);
4171 	} else {
4172 		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4173 						sw_context);
4174 		header = NULL;
4175 	}
4176 	mutex_unlock(&dev_priv->binding_mutex);
4177 	if (ret)
4178 		goto out_err;
4179 
4180 	vmw_query_bo_switch_commit(dev_priv, sw_context);
4181 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4182 					 (user_fence_rep) ? &handle : NULL);
4183 	/*
4184 	 * This error is harmless, because if fence submission fails,
4185 	 * vmw_fifo_send_fence will sync. The error will be propagated to
4186 	 * user-space in @fence_rep
4187 	 */
4188 	if (ret != 0)
4189 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4190 
4191 	vmw_execbuf_bindings_commit(sw_context, false);
4192 	vmw_bind_dx_query_mob(sw_context);
4193 	vmw_validation_res_unreserve(&val_ctx, false);
4194 
4195 	vmw_validation_bo_fence(sw_context->ctx, fence);
4196 
4197 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4198 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4199 
4200 	/*
4201 	 * If anything fails here, give up trying to export the fence and do a
4202 	 * sync since the user mode will not be able to sync the fence itself.
4203 	 * This ensures we are still functionally correct.
4204 	 */
4205 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4206 
4207 		sync_file = sync_file_create(&fence->base);
4208 		if (!sync_file) {
4209 			VMW_DEBUG_USER("Sync file create failed for fence\n");
4210 			put_unused_fd(out_fence_fd);
4211 			out_fence_fd = -1;
4212 
4213 			(void) vmw_fence_obj_wait(fence, false, false,
4214 						  VMW_FENCE_WAIT_TIMEOUT);
4215 		} else {
4216 			/* Link the fence with the FD created earlier */
4217 			fd_install(out_fence_fd, sync_file->file);
4218 		}
4219 	}
4220 
4221 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4222 				    user_fence_rep, fence, handle, out_fence_fd,
4223 				    sync_file);
4224 
4225 	/* Don't unreference when handing fence out */
4226 	if (unlikely(out_fence != NULL)) {
4227 		*out_fence = fence;
4228 		fence = NULL;
4229 	} else if (likely(fence != NULL)) {
4230 		vmw_fence_obj_unreference(&fence);
4231 	}
4232 
4233 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4234 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4235 
4236 	/*
4237 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4238 	 * in resource destruction paths.
4239 	 */
4240 	vmw_validation_unref_lists(&val_ctx);
4241 
4242 	return 0;
4243 
4244 out_unlock_binding:
4245 	mutex_unlock(&dev_priv->binding_mutex);
4246 out_err:
4247 	vmw_validation_bo_backoff(&val_ctx);
4248 out_err_nores:
4249 	vmw_execbuf_bindings_commit(sw_context, true);
4250 	vmw_validation_res_unreserve(&val_ctx, true);
4251 	vmw_resource_relocations_free(&sw_context->res_relocations);
4252 	vmw_free_relocations(sw_context);
4253 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4254 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4255 out_unlock:
4256 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4257 	vmw_validation_drop_ht(&val_ctx);
4258 	WARN_ON(!list_empty(&sw_context->ctx_list));
4259 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4260 
4261 	/*
4262 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4263 	 * in resource destruction paths.
4264 	 */
4265 	vmw_validation_unref_lists(&val_ctx);
4266 out_free_header:
4267 	if (header)
4268 		vmw_cmdbuf_header_free(header);
4269 out_free_fence_fd:
4270 	if (out_fence_fd >= 0)
4271 		put_unused_fd(out_fence_fd);
4272 
4273 	return ret;
4274 }
4275 
4276 /**
4277  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4278  *
4279  * @dev_priv: The device private structure.
4280  *
4281  * This function is called to idle the fifo and unpin the query buffer if the
4282  * normal way to do this hits an error, which should typically be extremely
4283  * rare.
4284  */
4285 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4286 {
4287 	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4288 
4289 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4290 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4291 	if (dev_priv->dummy_query_bo_pinned) {
4292 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4293 		dev_priv->dummy_query_bo_pinned = false;
4294 	}
4295 }
4296 
4297 
4298 /**
4299  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4300  * bo.
4301  *
4302  * @dev_priv: The device private structure.
4303  * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4304  * query barrier that flushes all queries touching the current buffer pointed to
4305  * by @dev_priv->pinned_bo
4306  *
4307  * This function should be used to unpin the pinned query bo, or as a query
4308  * barrier when we need to make sure that all queries have finished before the
4309  * next fifo command. (For example on hardware context destructions where the
4310  * hardware may otherwise leak unfinished queries).
4311  *
4312  * This function does not return any failure codes, but make attempts to do safe
4313  * unpinning in case of errors.
4314  *
4315  * The function will synchronize on the previous query barrier, and will thus
4316  * not finish until that barrier has executed.
4317  *
4318  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4319  * calling this function.
4320  */
4321 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4322 				     struct vmw_fence_obj *fence)
4323 {
4324 	int ret = 0;
4325 	struct vmw_fence_obj *lfence = NULL;
4326 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4327 
4328 	if (dev_priv->pinned_bo == NULL)
4329 		goto out_unlock;
4330 
4331 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4332 				    false);
4333 	if (ret)
4334 		goto out_no_reserve;
4335 
4336 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4337 				    false);
4338 	if (ret)
4339 		goto out_no_reserve;
4340 
4341 	ret = vmw_validation_bo_reserve(&val_ctx, false);
4342 	if (ret)
4343 		goto out_no_reserve;
4344 
4345 	if (dev_priv->query_cid_valid) {
4346 		BUG_ON(fence != NULL);
4347 		ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4348 		if (ret)
4349 			goto out_no_emit;
4350 		dev_priv->query_cid_valid = false;
4351 	}
4352 
4353 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4354 	if (dev_priv->dummy_query_bo_pinned) {
4355 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4356 		dev_priv->dummy_query_bo_pinned = false;
4357 	}
4358 	if (fence == NULL) {
4359 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4360 						  NULL);
4361 		fence = lfence;
4362 	}
4363 	vmw_validation_bo_fence(&val_ctx, fence);
4364 	if (lfence != NULL)
4365 		vmw_fence_obj_unreference(&lfence);
4366 
4367 	vmw_validation_unref_lists(&val_ctx);
4368 	vmw_bo_unreference(&dev_priv->pinned_bo);
4369 
4370 out_unlock:
4371 	return;
4372 out_no_emit:
4373 	vmw_validation_bo_backoff(&val_ctx);
4374 out_no_reserve:
4375 	vmw_validation_unref_lists(&val_ctx);
4376 	vmw_execbuf_unpin_panic(dev_priv);
4377 	vmw_bo_unreference(&dev_priv->pinned_bo);
4378 }
4379 
4380 /**
4381  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4382  *
4383  * @dev_priv: The device private structure.
4384  *
4385  * This function should be used to unpin the pinned query bo, or as a query
4386  * barrier when we need to make sure that all queries have finished before the
4387  * next fifo command. (For example on hardware context destructions where the
4388  * hardware may otherwise leak unfinished queries).
4389  *
4390  * This function does not return any failure codes, but make attempts to do safe
4391  * unpinning in case of errors.
4392  *
4393  * The function will synchronize on the previous query barrier, and will thus
4394  * not finish until that barrier has executed.
4395  */
4396 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4397 {
4398 	mutex_lock(&dev_priv->cmdbuf_mutex);
4399 	if (dev_priv->query_cid_valid)
4400 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4401 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4402 }
4403 
4404 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4405 		      struct drm_file *file_priv)
4406 {
4407 	struct vmw_private *dev_priv = vmw_priv(dev);
4408 	struct drm_vmw_execbuf_arg *arg = data;
4409 	int ret;
4410 	struct dma_fence *in_fence = NULL;
4411 
4412 	MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4413 	MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4414 
4415 	/*
4416 	 * Extend the ioctl argument while maintaining backwards compatibility:
4417 	 * We take different code paths depending on the value of arg->version.
4418 	 *
4419 	 * Note: The ioctl argument is extended and zeropadded by core DRM.
4420 	 */
4421 	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4422 		     arg->version == 0)) {
4423 		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4424 		ret = -EINVAL;
4425 		goto mksstats_out;
4426 	}
4427 
4428 	switch (arg->version) {
4429 	case 1:
4430 		/* For v1 core DRM have extended + zeropadded the data */
4431 		arg->context_handle = (uint32_t) -1;
4432 		break;
4433 	case 2:
4434 	default:
4435 		/* For v2 and later core DRM would have correctly copied it */
4436 		break;
4437 	}
4438 
4439 	/* If imported a fence FD from elsewhere, then wait on it */
4440 	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4441 		in_fence = sync_file_get_fence(arg->imported_fence_fd);
4442 
4443 		if (!in_fence) {
4444 			VMW_DEBUG_USER("Cannot get imported fence\n");
4445 			ret = -EINVAL;
4446 			goto mksstats_out;
4447 		}
4448 
4449 		ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4450 		if (ret)
4451 			goto out;
4452 	}
4453 
4454 	ret = vmw_execbuf_process(file_priv, dev_priv,
4455 				  (void __user *)(unsigned long)arg->commands,
4456 				  NULL, arg->command_size, arg->throttle_us,
4457 				  arg->context_handle,
4458 				  (void __user *)(unsigned long)arg->fence_rep,
4459 				  NULL, arg->flags);
4460 
4461 	if (unlikely(ret != 0))
4462 		goto out;
4463 
4464 	vmw_kms_cursor_post_execbuf(dev_priv);
4465 
4466 out:
4467 	if (in_fence)
4468 		dma_fence_put(in_fence);
4469 
4470 mksstats_out:
4471 	MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
4472 	return ret;
4473 }
4474