1 /**************************************************************************
2  *
3  * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34 
35 #define VMW_RES_HT_ORDER 12
36 
37 /**
38  * enum vmw_resource_relocation_type - Relocation type for resources
39  *
40  * @vmw_res_rel_normal: Traditional relocation. The resource id in the
41  * command stream is replaced with the actual id after validation.
42  * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
43  * with a NOP.
44  * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
45  * after validation is -1, the command is replaced with a NOP. Otherwise no
46  * action.
47  */
48 enum vmw_resource_relocation_type {
49 	vmw_res_rel_normal,
50 	vmw_res_rel_nop,
51 	vmw_res_rel_cond_nop,
52 	vmw_res_rel_max
53 };
54 
55 /**
56  * struct vmw_resource_relocation - Relocation info for resources
57  *
58  * @head: List head for the software context's relocation list.
59  * @res: Non-ref-counted pointer to the resource.
60  * @offset: Offset of single byte entries into the command buffer where the
61  * id that needs fixup is located.
62  * @rel_type: Type of relocation.
63  */
64 struct vmw_resource_relocation {
65 	struct list_head head;
66 	const struct vmw_resource *res;
67 	u32 offset:29;
68 	enum vmw_resource_relocation_type rel_type:3;
69 };
70 
71 /**
72  * struct vmw_resource_val_node - Validation info for resources
73  *
74  * @head: List head for the software context's resource list.
75  * @hash: Hash entry for quick resouce to val_node lookup.
76  * @res: Ref-counted pointer to the resource.
77  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
78  * @new_backup: Refcounted pointer to the new backup buffer.
79  * @staged_bindings: If @res is a context, tracks bindings set up during
80  * the command batch. Otherwise NULL.
81  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
82  * @first_usage: Set to true the first time the resource is referenced in
83  * the command stream.
84  * @switching_backup: The command stream provides a new backup buffer for a
85  * resource.
86  * @no_buffer_needed: This means @switching_backup is true on first buffer
87  * reference. So resource reservation does not need to allocate a backup
88  * buffer for the resource.
89  */
90 struct vmw_resource_val_node {
91 	struct list_head head;
92 	struct drm_hash_item hash;
93 	struct vmw_resource *res;
94 	struct vmw_dma_buffer *new_backup;
95 	struct vmw_ctx_binding_state *staged_bindings;
96 	unsigned long new_backup_offset;
97 	u32 first_usage : 1;
98 	u32 switching_backup : 1;
99 	u32 no_buffer_needed : 1;
100 };
101 
102 /**
103  * struct vmw_cmd_entry - Describe a command for the verifier
104  *
105  * @user_allow: Whether allowed from the execbuf ioctl.
106  * @gb_disable: Whether disabled if guest-backed objects are available.
107  * @gb_enable: Whether enabled iff guest-backed objects are available.
108  */
109 struct vmw_cmd_entry {
110 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
111 		     SVGA3dCmdHeader *);
112 	bool user_allow;
113 	bool gb_disable;
114 	bool gb_enable;
115 };
116 
117 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
118 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
119 				       (_gb_disable), (_gb_enable)}
120 
121 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
122 					struct vmw_sw_context *sw_context,
123 					struct vmw_resource *ctx);
124 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
125 				 struct vmw_sw_context *sw_context,
126 				 SVGAMobId *id,
127 				 struct vmw_dma_buffer **vmw_bo_p);
128 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
129 				   struct vmw_dma_buffer *vbo,
130 				   bool validate_as_mob,
131 				   uint32_t *p_val_node);
132 /**
133  * vmw_ptr_diff - Compute the offset from a to b in bytes
134  *
135  * @a: A starting pointer.
136  * @b: A pointer offset in the same address space.
137  *
138  * Returns: The offset in bytes between the two pointers.
139  */
140 static size_t vmw_ptr_diff(void *a, void *b)
141 {
142 	return (unsigned long) b - (unsigned long) a;
143 }
144 
145 /**
146  * vmw_resources_unreserve - unreserve resources previously reserved for
147  * command submission.
148  *
149  * @sw_context: pointer to the software context
150  * @backoff: Whether command submission failed.
151  */
152 static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
153 				    bool backoff)
154 {
155 	struct vmw_resource_val_node *val;
156 	struct list_head *list = &sw_context->resource_list;
157 
158 	if (sw_context->dx_query_mob && !backoff)
159 		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
160 					  sw_context->dx_query_mob);
161 
162 	list_for_each_entry(val, list, head) {
163 		struct vmw_resource *res = val->res;
164 		bool switch_backup =
165 			(backoff) ? false : val->switching_backup;
166 
167 		/*
168 		 * Transfer staged context bindings to the
169 		 * persistent context binding tracker.
170 		 */
171 		if (unlikely(val->staged_bindings)) {
172 			if (!backoff) {
173 				vmw_binding_state_commit
174 					(vmw_context_binding_state(val->res),
175 					 val->staged_bindings);
176 			}
177 
178 			if (val->staged_bindings != sw_context->staged_bindings)
179 				vmw_binding_state_free(val->staged_bindings);
180 			else
181 				sw_context->staged_bindings_inuse = false;
182 			val->staged_bindings = NULL;
183 		}
184 		vmw_resource_unreserve(res, switch_backup, val->new_backup,
185 				       val->new_backup_offset);
186 		vmw_dmabuf_unreference(&val->new_backup);
187 	}
188 }
189 
190 /**
191  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
192  * added to the validate list.
193  *
194  * @dev_priv: Pointer to the device private:
195  * @sw_context: The validation context:
196  * @node: The validation node holding this context.
197  */
198 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
199 				   struct vmw_sw_context *sw_context,
200 				   struct vmw_resource_val_node *node)
201 {
202 	int ret;
203 
204 	ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
205 	if (unlikely(ret != 0))
206 		goto out_err;
207 
208 	if (!sw_context->staged_bindings) {
209 		sw_context->staged_bindings =
210 			vmw_binding_state_alloc(dev_priv);
211 		if (IS_ERR(sw_context->staged_bindings)) {
212 			DRM_ERROR("Failed to allocate context binding "
213 				  "information.\n");
214 			ret = PTR_ERR(sw_context->staged_bindings);
215 			sw_context->staged_bindings = NULL;
216 			goto out_err;
217 		}
218 	}
219 
220 	if (sw_context->staged_bindings_inuse) {
221 		node->staged_bindings = vmw_binding_state_alloc(dev_priv);
222 		if (IS_ERR(node->staged_bindings)) {
223 			DRM_ERROR("Failed to allocate context binding "
224 				  "information.\n");
225 			ret = PTR_ERR(node->staged_bindings);
226 			node->staged_bindings = NULL;
227 			goto out_err;
228 		}
229 	} else {
230 		node->staged_bindings = sw_context->staged_bindings;
231 		sw_context->staged_bindings_inuse = true;
232 	}
233 
234 	return 0;
235 out_err:
236 	return ret;
237 }
238 
239 /**
240  * vmw_resource_val_add - Add a resource to the software context's
241  * resource list if it's not already on it.
242  *
243  * @sw_context: Pointer to the software context.
244  * @res: Pointer to the resource.
245  * @p_node On successful return points to a valid pointer to a
246  * struct vmw_resource_val_node, if non-NULL on entry.
247  */
248 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
249 				struct vmw_resource *res,
250 				struct vmw_resource_val_node **p_node)
251 {
252 	struct vmw_private *dev_priv = res->dev_priv;
253 	struct vmw_resource_val_node *node;
254 	struct drm_hash_item *hash;
255 	int ret;
256 
257 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
258 				    &hash) == 0)) {
259 		node = container_of(hash, struct vmw_resource_val_node, hash);
260 		node->first_usage = false;
261 		if (unlikely(p_node != NULL))
262 			*p_node = node;
263 		return 0;
264 	}
265 
266 	node = kzalloc(sizeof(*node), GFP_KERNEL);
267 	if (unlikely(!node)) {
268 		DRM_ERROR("Failed to allocate a resource validation "
269 			  "entry.\n");
270 		return -ENOMEM;
271 	}
272 
273 	node->hash.key = (unsigned long) res;
274 	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
275 	if (unlikely(ret != 0)) {
276 		DRM_ERROR("Failed to initialize a resource validation "
277 			  "entry.\n");
278 		kfree(node);
279 		return ret;
280 	}
281 	node->res = vmw_resource_reference(res);
282 	node->first_usage = true;
283 	if (unlikely(p_node != NULL))
284 		*p_node = node;
285 
286 	if (!dev_priv->has_mob) {
287 		list_add_tail(&node->head, &sw_context->resource_list);
288 		return 0;
289 	}
290 
291 	switch (vmw_res_type(res)) {
292 	case vmw_res_context:
293 	case vmw_res_dx_context:
294 		list_add(&node->head, &sw_context->ctx_resource_list);
295 		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
296 		break;
297 	case vmw_res_cotable:
298 		list_add_tail(&node->head, &sw_context->ctx_resource_list);
299 		break;
300 	default:
301 		list_add_tail(&node->head, &sw_context->resource_list);
302 		break;
303 	}
304 
305 	return ret;
306 }
307 
308 /**
309  * vmw_view_res_val_add - Add a view and the surface it's pointing to
310  * to the validation list
311  *
312  * @sw_context: The software context holding the validation list.
313  * @view: Pointer to the view resource.
314  *
315  * Returns 0 if success, negative error code otherwise.
316  */
317 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
318 				struct vmw_resource *view)
319 {
320 	int ret;
321 
322 	/*
323 	 * First add the resource the view is pointing to, otherwise
324 	 * it may be swapped out when the view is validated.
325 	 */
326 	ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
327 	if (ret)
328 		return ret;
329 
330 	return vmw_resource_val_add(sw_context, view, NULL);
331 }
332 
333 /**
334  * vmw_view_id_val_add - Look up a view and add it and the surface it's
335  * pointing to to the validation list.
336  *
337  * @sw_context: The software context holding the validation list.
338  * @view_type: The view type to look up.
339  * @id: view id of the view.
340  *
341  * The view is represented by a view id and the DX context it's created on,
342  * or scheduled for creation on. If there is no DX context set, the function
343  * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
344  */
345 static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
346 			       enum vmw_view_type view_type, u32 id)
347 {
348 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
349 	struct vmw_resource *view;
350 	int ret;
351 
352 	if (!ctx_node) {
353 		DRM_ERROR("DX Context not set.\n");
354 		return -EINVAL;
355 	}
356 
357 	view = vmw_view_lookup(sw_context->man, view_type, id);
358 	if (IS_ERR(view))
359 		return PTR_ERR(view);
360 
361 	ret = vmw_view_res_val_add(sw_context, view);
362 	vmw_resource_unreference(&view);
363 
364 	return ret;
365 }
366 
367 /**
368  * vmw_resource_context_res_add - Put resources previously bound to a context on
369  * the validation list
370  *
371  * @dev_priv: Pointer to a device private structure
372  * @sw_context: Pointer to a software context used for this command submission
373  * @ctx: Pointer to the context resource
374  *
375  * This function puts all resources that were previously bound to @ctx on
376  * the resource validation list. This is part of the context state reemission
377  */
378 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
379 					struct vmw_sw_context *sw_context,
380 					struct vmw_resource *ctx)
381 {
382 	struct list_head *binding_list;
383 	struct vmw_ctx_bindinfo *entry;
384 	int ret = 0;
385 	struct vmw_resource *res;
386 	u32 i;
387 
388 	/* Add all cotables to the validation list. */
389 	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
390 		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
391 			res = vmw_context_cotable(ctx, i);
392 			if (IS_ERR(res))
393 				continue;
394 
395 			ret = vmw_resource_val_add(sw_context, res, NULL);
396 			vmw_resource_unreference(&res);
397 			if (unlikely(ret != 0))
398 				return ret;
399 		}
400 	}
401 
402 
403 	/* Add all resources bound to the context to the validation list */
404 	mutex_lock(&dev_priv->binding_mutex);
405 	binding_list = vmw_context_binding_list(ctx);
406 
407 	list_for_each_entry(entry, binding_list, ctx_list) {
408 		/* entry->res is not refcounted */
409 		res = vmw_resource_reference_unless_doomed(entry->res);
410 		if (unlikely(res == NULL))
411 			continue;
412 
413 		if (vmw_res_type(entry->res) == vmw_res_view)
414 			ret = vmw_view_res_val_add(sw_context, entry->res);
415 		else
416 			ret = vmw_resource_val_add(sw_context, entry->res,
417 						   NULL);
418 		vmw_resource_unreference(&res);
419 		if (unlikely(ret != 0))
420 			break;
421 	}
422 
423 	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
424 		struct vmw_dma_buffer *dx_query_mob;
425 
426 		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
427 		if (dx_query_mob)
428 			ret = vmw_bo_to_validate_list(sw_context,
429 						      dx_query_mob,
430 						      true, NULL);
431 	}
432 
433 	mutex_unlock(&dev_priv->binding_mutex);
434 	return ret;
435 }
436 
437 /**
438  * vmw_resource_relocation_add - Add a relocation to the relocation list
439  *
440  * @list: Pointer to head of relocation list.
441  * @res: The resource.
442  * @offset: Offset into the command buffer currently being parsed where the
443  * id that needs fixup is located. Granularity is one byte.
444  * @rel_type: Relocation type.
445  */
446 static int vmw_resource_relocation_add(struct list_head *list,
447 				       const struct vmw_resource *res,
448 				       unsigned long offset,
449 				       enum vmw_resource_relocation_type
450 				       rel_type)
451 {
452 	struct vmw_resource_relocation *rel;
453 
454 	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
455 	if (unlikely(!rel)) {
456 		DRM_ERROR("Failed to allocate a resource relocation.\n");
457 		return -ENOMEM;
458 	}
459 
460 	rel->res = res;
461 	rel->offset = offset;
462 	rel->rel_type = rel_type;
463 	list_add_tail(&rel->head, list);
464 
465 	return 0;
466 }
467 
468 /**
469  * vmw_resource_relocations_free - Free all relocations on a list
470  *
471  * @list: Pointer to the head of the relocation list.
472  */
473 static void vmw_resource_relocations_free(struct list_head *list)
474 {
475 	struct vmw_resource_relocation *rel, *n;
476 
477 	list_for_each_entry_safe(rel, n, list, head) {
478 		list_del(&rel->head);
479 		kfree(rel);
480 	}
481 }
482 
483 /**
484  * vmw_resource_relocations_apply - Apply all relocations on a list
485  *
486  * @cb: Pointer to the start of the command buffer bein patch. This need
487  * not be the same buffer as the one being parsed when the relocation
488  * list was built, but the contents must be the same modulo the
489  * resource ids.
490  * @list: Pointer to the head of the relocation list.
491  */
492 static void vmw_resource_relocations_apply(uint32_t *cb,
493 					   struct list_head *list)
494 {
495 	struct vmw_resource_relocation *rel;
496 
497 	/* Validate the struct vmw_resource_relocation member size */
498 	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
499 	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
500 
501 	list_for_each_entry(rel, list, head) {
502 		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
503 		switch (rel->rel_type) {
504 		case vmw_res_rel_normal:
505 			*addr = rel->res->id;
506 			break;
507 		case vmw_res_rel_nop:
508 			*addr = SVGA_3D_CMD_NOP;
509 			break;
510 		default:
511 			if (rel->res->id == -1)
512 				*addr = SVGA_3D_CMD_NOP;
513 			break;
514 		}
515 	}
516 }
517 
518 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
519 			   struct vmw_sw_context *sw_context,
520 			   SVGA3dCmdHeader *header)
521 {
522 	return -EINVAL;
523 }
524 
525 static int vmw_cmd_ok(struct vmw_private *dev_priv,
526 		      struct vmw_sw_context *sw_context,
527 		      SVGA3dCmdHeader *header)
528 {
529 	return 0;
530 }
531 
532 /**
533  * vmw_bo_to_validate_list - add a bo to a validate list
534  *
535  * @sw_context: The software context used for this command submission batch.
536  * @bo: The buffer object to add.
537  * @validate_as_mob: Validate this buffer as a MOB.
538  * @p_val_node: If non-NULL Will be updated with the validate node number
539  * on return.
540  *
541  * Returns -EINVAL if the limit of number of buffer objects per command
542  * submission is reached.
543  */
544 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
545 				   struct vmw_dma_buffer *vbo,
546 				   bool validate_as_mob,
547 				   uint32_t *p_val_node)
548 {
549 	uint32_t val_node;
550 	struct vmw_validate_buffer *vval_buf;
551 	struct ttm_validate_buffer *val_buf;
552 	struct drm_hash_item *hash;
553 	int ret;
554 
555 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
556 				    &hash) == 0)) {
557 		vval_buf = container_of(hash, struct vmw_validate_buffer,
558 					hash);
559 		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
560 			DRM_ERROR("Inconsistent buffer usage.\n");
561 			return -EINVAL;
562 		}
563 		val_buf = &vval_buf->base;
564 		val_node = vval_buf - sw_context->val_bufs;
565 	} else {
566 		val_node = sw_context->cur_val_buf;
567 		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
568 			DRM_ERROR("Max number of DMA buffers per submission "
569 				  "exceeded.\n");
570 			return -EINVAL;
571 		}
572 		vval_buf = &sw_context->val_bufs[val_node];
573 		vval_buf->hash.key = (unsigned long) vbo;
574 		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
575 		if (unlikely(ret != 0)) {
576 			DRM_ERROR("Failed to initialize a buffer validation "
577 				  "entry.\n");
578 			return ret;
579 		}
580 		++sw_context->cur_val_buf;
581 		val_buf = &vval_buf->base;
582 		val_buf->bo = ttm_bo_reference(&vbo->base);
583 		val_buf->shared = false;
584 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
585 		vval_buf->validate_as_mob = validate_as_mob;
586 	}
587 
588 	if (p_val_node)
589 		*p_val_node = val_node;
590 
591 	return 0;
592 }
593 
594 /**
595  * vmw_resources_reserve - Reserve all resources on the sw_context's
596  * resource list.
597  *
598  * @sw_context: Pointer to the software context.
599  *
600  * Note that since vmware's command submission currently is protected by
601  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
602  * since only a single thread at once will attempt this.
603  */
604 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
605 {
606 	struct vmw_resource_val_node *val;
607 	int ret = 0;
608 
609 	list_for_each_entry(val, &sw_context->resource_list, head) {
610 		struct vmw_resource *res = val->res;
611 
612 		ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
613 		if (unlikely(ret != 0))
614 			return ret;
615 
616 		if (res->backup) {
617 			struct vmw_dma_buffer *vbo = res->backup;
618 
619 			ret = vmw_bo_to_validate_list
620 				(sw_context, vbo,
621 				 vmw_resource_needs_backup(res), NULL);
622 
623 			if (unlikely(ret != 0))
624 				return ret;
625 		}
626 	}
627 
628 	if (sw_context->dx_query_mob) {
629 		struct vmw_dma_buffer *expected_dx_query_mob;
630 
631 		expected_dx_query_mob =
632 			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
633 		if (expected_dx_query_mob &&
634 		    expected_dx_query_mob != sw_context->dx_query_mob) {
635 			ret = -EINVAL;
636 		}
637 	}
638 
639 	return ret;
640 }
641 
642 /**
643  * vmw_resources_validate - Validate all resources on the sw_context's
644  * resource list.
645  *
646  * @sw_context: Pointer to the software context.
647  *
648  * Before this function is called, all resource backup buffers must have
649  * been validated.
650  */
651 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
652 {
653 	struct vmw_resource_val_node *val;
654 	int ret;
655 
656 	list_for_each_entry(val, &sw_context->resource_list, head) {
657 		struct vmw_resource *res = val->res;
658 		struct vmw_dma_buffer *backup = res->backup;
659 
660 		ret = vmw_resource_validate(res);
661 		if (unlikely(ret != 0)) {
662 			if (ret != -ERESTARTSYS)
663 				DRM_ERROR("Failed to validate resource.\n");
664 			return ret;
665 		}
666 
667 		/* Check if the resource switched backup buffer */
668 		if (backup && res->backup && (backup != res->backup)) {
669 			struct vmw_dma_buffer *vbo = res->backup;
670 
671 			ret = vmw_bo_to_validate_list
672 				(sw_context, vbo,
673 				 vmw_resource_needs_backup(res), NULL);
674 			if (ret) {
675 				ttm_bo_unreserve(&vbo->base);
676 				return ret;
677 			}
678 		}
679 	}
680 	return 0;
681 }
682 
683 /**
684  * vmw_cmd_res_reloc_add - Add a resource to a software context's
685  * relocation- and validation lists.
686  *
687  * @dev_priv: Pointer to a struct vmw_private identifying the device.
688  * @sw_context: Pointer to the software context.
689  * @id_loc: Pointer to where the id that needs translation is located.
690  * @res: Valid pointer to a struct vmw_resource.
691  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
692  * used for this resource is returned here.
693  */
694 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
695 				 struct vmw_sw_context *sw_context,
696 				 uint32_t *id_loc,
697 				 struct vmw_resource *res,
698 				 struct vmw_resource_val_node **p_val)
699 {
700 	int ret;
701 	struct vmw_resource_val_node *node;
702 
703 	*p_val = NULL;
704 	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
705 					  res,
706 					  vmw_ptr_diff(sw_context->buf_start,
707 						       id_loc),
708 					  vmw_res_rel_normal);
709 	if (unlikely(ret != 0))
710 		return ret;
711 
712 	ret = vmw_resource_val_add(sw_context, res, &node);
713 	if (unlikely(ret != 0))
714 		return ret;
715 
716 	if (p_val)
717 		*p_val = node;
718 
719 	return 0;
720 }
721 
722 
723 /**
724  * vmw_cmd_res_check - Check that a resource is present and if so, put it
725  * on the resource validate list unless it's already there.
726  *
727  * @dev_priv: Pointer to a device private structure.
728  * @sw_context: Pointer to the software context.
729  * @res_type: Resource type.
730  * @converter: User-space visisble type specific information.
731  * @id_loc: Pointer to the location in the command buffer currently being
732  * parsed from where the user-space resource id handle is located.
733  * @p_val: Pointer to pointer to resource validalidation node. Populated
734  * on exit.
735  */
736 static int
737 vmw_cmd_res_check(struct vmw_private *dev_priv,
738 		  struct vmw_sw_context *sw_context,
739 		  enum vmw_res_type res_type,
740 		  const struct vmw_user_resource_conv *converter,
741 		  uint32_t *id_loc,
742 		  struct vmw_resource_val_node **p_val)
743 {
744 	struct vmw_res_cache_entry *rcache =
745 		&sw_context->res_cache[res_type];
746 	struct vmw_resource *res;
747 	struct vmw_resource_val_node *node;
748 	int ret;
749 
750 	if (*id_loc == SVGA3D_INVALID_ID) {
751 		if (p_val)
752 			*p_val = NULL;
753 		if (res_type == vmw_res_context) {
754 			DRM_ERROR("Illegal context invalid id.\n");
755 			return -EINVAL;
756 		}
757 		return 0;
758 	}
759 
760 	/*
761 	 * Fastpath in case of repeated commands referencing the same
762 	 * resource
763 	 */
764 
765 	if (likely(rcache->valid && *id_loc == rcache->handle)) {
766 		const struct vmw_resource *res = rcache->res;
767 
768 		rcache->node->first_usage = false;
769 		if (p_val)
770 			*p_val = rcache->node;
771 
772 		return vmw_resource_relocation_add
773 			(&sw_context->res_relocations, res,
774 			 vmw_ptr_diff(sw_context->buf_start, id_loc),
775 			 vmw_res_rel_normal);
776 	}
777 
778 	ret = vmw_user_resource_lookup_handle(dev_priv,
779 					      sw_context->fp->tfile,
780 					      *id_loc,
781 					      converter,
782 					      &res);
783 	if (unlikely(ret != 0)) {
784 		DRM_ERROR("Could not find or use resource 0x%08x.\n",
785 			  (unsigned) *id_loc);
786 		dump_stack();
787 		return ret;
788 	}
789 
790 	rcache->valid = true;
791 	rcache->res = res;
792 	rcache->handle = *id_loc;
793 
794 	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
795 				    res, &node);
796 	if (unlikely(ret != 0))
797 		goto out_no_reloc;
798 
799 	rcache->node = node;
800 	if (p_val)
801 		*p_val = node;
802 	vmw_resource_unreference(&res);
803 	return 0;
804 
805 out_no_reloc:
806 	BUG_ON(sw_context->error_resource != NULL);
807 	sw_context->error_resource = res;
808 
809 	return ret;
810 }
811 
812 /**
813  * vmw_rebind_dx_query - Rebind DX query associated with the context
814  *
815  * @ctx_res: context the query belongs to
816  *
817  * This function assumes binding_mutex is held.
818  */
819 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
820 {
821 	struct vmw_private *dev_priv = ctx_res->dev_priv;
822 	struct vmw_dma_buffer *dx_query_mob;
823 	struct {
824 		SVGA3dCmdHeader header;
825 		SVGA3dCmdDXBindAllQuery body;
826 	} *cmd;
827 
828 
829 	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
830 
831 	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
832 		return 0;
833 
834 	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
835 
836 	if (cmd == NULL) {
837 		DRM_ERROR("Failed to rebind queries.\n");
838 		return -ENOMEM;
839 	}
840 
841 	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
842 	cmd->header.size = sizeof(cmd->body);
843 	cmd->body.cid = ctx_res->id;
844 	cmd->body.mobid = dx_query_mob->base.mem.start;
845 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
846 
847 	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
848 
849 	return 0;
850 }
851 
852 /**
853  * vmw_rebind_contexts - Rebind all resources previously bound to
854  * referenced contexts.
855  *
856  * @sw_context: Pointer to the software context.
857  *
858  * Rebind context binding points that have been scrubbed because of eviction.
859  */
860 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
861 {
862 	struct vmw_resource_val_node *val;
863 	int ret;
864 
865 	list_for_each_entry(val, &sw_context->resource_list, head) {
866 		if (unlikely(!val->staged_bindings))
867 			break;
868 
869 		ret = vmw_binding_rebind_all
870 			(vmw_context_binding_state(val->res));
871 		if (unlikely(ret != 0)) {
872 			if (ret != -ERESTARTSYS)
873 				DRM_ERROR("Failed to rebind context.\n");
874 			return ret;
875 		}
876 
877 		ret = vmw_rebind_all_dx_query(val->res);
878 		if (ret != 0)
879 			return ret;
880 	}
881 
882 	return 0;
883 }
884 
885 /**
886  * vmw_view_bindings_add - Add an array of view bindings to a context
887  * binding state tracker.
888  *
889  * @sw_context: The execbuf state used for this command.
890  * @view_type: View type for the bindings.
891  * @binding_type: Binding type for the bindings.
892  * @shader_slot: The shader slot to user for the bindings.
893  * @view_ids: Array of view ids to be bound.
894  * @num_views: Number of view ids in @view_ids.
895  * @first_slot: The binding slot to be used for the first view id in @view_ids.
896  */
897 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
898 				 enum vmw_view_type view_type,
899 				 enum vmw_ctx_binding_type binding_type,
900 				 uint32 shader_slot,
901 				 uint32 view_ids[], u32 num_views,
902 				 u32 first_slot)
903 {
904 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
905 	struct vmw_cmdbuf_res_manager *man;
906 	u32 i;
907 	int ret;
908 
909 	if (!ctx_node) {
910 		DRM_ERROR("DX Context not set.\n");
911 		return -EINVAL;
912 	}
913 
914 	man = sw_context->man;
915 	for (i = 0; i < num_views; ++i) {
916 		struct vmw_ctx_bindinfo_view binding;
917 		struct vmw_resource *view = NULL;
918 
919 		if (view_ids[i] != SVGA3D_INVALID_ID) {
920 			view = vmw_view_lookup(man, view_type, view_ids[i]);
921 			if (IS_ERR(view)) {
922 				DRM_ERROR("View not found.\n");
923 				return PTR_ERR(view);
924 			}
925 
926 			ret = vmw_view_res_val_add(sw_context, view);
927 			if (ret) {
928 				DRM_ERROR("Could not add view to "
929 					  "validation list.\n");
930 				vmw_resource_unreference(&view);
931 				return ret;
932 			}
933 		}
934 		binding.bi.ctx = ctx_node->res;
935 		binding.bi.res = view;
936 		binding.bi.bt = binding_type;
937 		binding.shader_slot = shader_slot;
938 		binding.slot = first_slot + i;
939 		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
940 				shader_slot, binding.slot);
941 		if (view)
942 			vmw_resource_unreference(&view);
943 	}
944 
945 	return 0;
946 }
947 
948 /**
949  * vmw_cmd_cid_check - Check a command header for valid context information.
950  *
951  * @dev_priv: Pointer to a device private structure.
952  * @sw_context: Pointer to the software context.
953  * @header: A command header with an embedded user-space context handle.
954  *
955  * Convenience function: Call vmw_cmd_res_check with the user-space context
956  * handle embedded in @header.
957  */
958 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
959 			     struct vmw_sw_context *sw_context,
960 			     SVGA3dCmdHeader *header)
961 {
962 	struct vmw_cid_cmd {
963 		SVGA3dCmdHeader header;
964 		uint32_t cid;
965 	} *cmd;
966 
967 	cmd = container_of(header, struct vmw_cid_cmd, header);
968 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
969 				 user_context_converter, &cmd->cid, NULL);
970 }
971 
972 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
973 					   struct vmw_sw_context *sw_context,
974 					   SVGA3dCmdHeader *header)
975 {
976 	struct vmw_sid_cmd {
977 		SVGA3dCmdHeader header;
978 		SVGA3dCmdSetRenderTarget body;
979 	} *cmd;
980 	struct vmw_resource_val_node *ctx_node;
981 	struct vmw_resource_val_node *res_node;
982 	int ret;
983 
984 	cmd = container_of(header, struct vmw_sid_cmd, header);
985 
986 	if (cmd->body.type >= SVGA3D_RT_MAX) {
987 		DRM_ERROR("Illegal render target type %u.\n",
988 			  (unsigned) cmd->body.type);
989 		return -EINVAL;
990 	}
991 
992 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
993 				user_context_converter, &cmd->body.cid,
994 				&ctx_node);
995 	if (unlikely(ret != 0))
996 		return ret;
997 
998 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
999 				user_surface_converter,
1000 				&cmd->body.target.sid, &res_node);
1001 	if (unlikely(ret != 0))
1002 		return ret;
1003 
1004 	if (dev_priv->has_mob) {
1005 		struct vmw_ctx_bindinfo_view binding;
1006 
1007 		binding.bi.ctx = ctx_node->res;
1008 		binding.bi.res = res_node ? res_node->res : NULL;
1009 		binding.bi.bt = vmw_ctx_binding_rt;
1010 		binding.slot = cmd->body.type;
1011 		vmw_binding_add(ctx_node->staged_bindings,
1012 				&binding.bi, 0, binding.slot);
1013 	}
1014 
1015 	return 0;
1016 }
1017 
1018 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
1019 				      struct vmw_sw_context *sw_context,
1020 				      SVGA3dCmdHeader *header)
1021 {
1022 	struct vmw_sid_cmd {
1023 		SVGA3dCmdHeader header;
1024 		SVGA3dCmdSurfaceCopy body;
1025 	} *cmd;
1026 	int ret;
1027 
1028 	cmd = container_of(header, struct vmw_sid_cmd, header);
1029 
1030 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1031 				user_surface_converter,
1032 				&cmd->body.src.sid, NULL);
1033 	if (ret)
1034 		return ret;
1035 
1036 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1037 				 user_surface_converter,
1038 				 &cmd->body.dest.sid, NULL);
1039 }
1040 
1041 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
1042 				      struct vmw_sw_context *sw_context,
1043 				      SVGA3dCmdHeader *header)
1044 {
1045 	struct {
1046 		SVGA3dCmdHeader header;
1047 		SVGA3dCmdDXBufferCopy body;
1048 	} *cmd;
1049 	int ret;
1050 
1051 	cmd = container_of(header, typeof(*cmd), header);
1052 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1053 				user_surface_converter,
1054 				&cmd->body.src, NULL);
1055 	if (ret != 0)
1056 		return ret;
1057 
1058 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1059 				 user_surface_converter,
1060 				 &cmd->body.dest, NULL);
1061 }
1062 
1063 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1064 				   struct vmw_sw_context *sw_context,
1065 				   SVGA3dCmdHeader *header)
1066 {
1067 	struct {
1068 		SVGA3dCmdHeader header;
1069 		SVGA3dCmdDXPredCopyRegion body;
1070 	} *cmd;
1071 	int ret;
1072 
1073 	cmd = container_of(header, typeof(*cmd), header);
1074 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1075 				user_surface_converter,
1076 				&cmd->body.srcSid, NULL);
1077 	if (ret != 0)
1078 		return ret;
1079 
1080 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1081 				 user_surface_converter,
1082 				 &cmd->body.dstSid, NULL);
1083 }
1084 
1085 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1086 				     struct vmw_sw_context *sw_context,
1087 				     SVGA3dCmdHeader *header)
1088 {
1089 	struct vmw_sid_cmd {
1090 		SVGA3dCmdHeader header;
1091 		SVGA3dCmdSurfaceStretchBlt body;
1092 	} *cmd;
1093 	int ret;
1094 
1095 	cmd = container_of(header, struct vmw_sid_cmd, header);
1096 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1097 				user_surface_converter,
1098 				&cmd->body.src.sid, NULL);
1099 	if (unlikely(ret != 0))
1100 		return ret;
1101 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1102 				 user_surface_converter,
1103 				 &cmd->body.dest.sid, NULL);
1104 }
1105 
1106 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1107 					 struct vmw_sw_context *sw_context,
1108 					 SVGA3dCmdHeader *header)
1109 {
1110 	struct vmw_sid_cmd {
1111 		SVGA3dCmdHeader header;
1112 		SVGA3dCmdBlitSurfaceToScreen body;
1113 	} *cmd;
1114 
1115 	cmd = container_of(header, struct vmw_sid_cmd, header);
1116 
1117 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1118 				 user_surface_converter,
1119 				 &cmd->body.srcImage.sid, NULL);
1120 }
1121 
1122 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1123 				 struct vmw_sw_context *sw_context,
1124 				 SVGA3dCmdHeader *header)
1125 {
1126 	struct vmw_sid_cmd {
1127 		SVGA3dCmdHeader header;
1128 		SVGA3dCmdPresent body;
1129 	} *cmd;
1130 
1131 
1132 	cmd = container_of(header, struct vmw_sid_cmd, header);
1133 
1134 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1135 				 user_surface_converter, &cmd->body.sid,
1136 				 NULL);
1137 }
1138 
1139 /**
1140  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1141  *
1142  * @dev_priv: The device private structure.
1143  * @new_query_bo: The new buffer holding query results.
1144  * @sw_context: The software context used for this command submission.
1145  *
1146  * This function checks whether @new_query_bo is suitable for holding
1147  * query results, and if another buffer currently is pinned for query
1148  * results. If so, the function prepares the state of @sw_context for
1149  * switching pinned buffers after successful submission of the current
1150  * command batch.
1151  */
1152 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1153 				       struct vmw_dma_buffer *new_query_bo,
1154 				       struct vmw_sw_context *sw_context)
1155 {
1156 	struct vmw_res_cache_entry *ctx_entry =
1157 		&sw_context->res_cache[vmw_res_context];
1158 	int ret;
1159 
1160 	BUG_ON(!ctx_entry->valid);
1161 	sw_context->last_query_ctx = ctx_entry->res;
1162 
1163 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1164 
1165 		if (unlikely(new_query_bo->base.num_pages > 4)) {
1166 			DRM_ERROR("Query buffer too large.\n");
1167 			return -EINVAL;
1168 		}
1169 
1170 		if (unlikely(sw_context->cur_query_bo != NULL)) {
1171 			sw_context->needs_post_query_barrier = true;
1172 			ret = vmw_bo_to_validate_list(sw_context,
1173 						      sw_context->cur_query_bo,
1174 						      dev_priv->has_mob, NULL);
1175 			if (unlikely(ret != 0))
1176 				return ret;
1177 		}
1178 		sw_context->cur_query_bo = new_query_bo;
1179 
1180 		ret = vmw_bo_to_validate_list(sw_context,
1181 					      dev_priv->dummy_query_bo,
1182 					      dev_priv->has_mob, NULL);
1183 		if (unlikely(ret != 0))
1184 			return ret;
1185 
1186 	}
1187 
1188 	return 0;
1189 }
1190 
1191 
1192 /**
1193  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1194  *
1195  * @dev_priv: The device private structure.
1196  * @sw_context: The software context used for this command submission batch.
1197  *
1198  * This function will check if we're switching query buffers, and will then,
1199  * issue a dummy occlusion query wait used as a query barrier. When the fence
1200  * object following that query wait has signaled, we are sure that all
1201  * preceding queries have finished, and the old query buffer can be unpinned.
1202  * However, since both the new query buffer and the old one are fenced with
1203  * that fence, we can do an asynchronus unpin now, and be sure that the
1204  * old query buffer won't be moved until the fence has signaled.
1205  *
1206  * As mentioned above, both the new - and old query buffers need to be fenced
1207  * using a sequence emitted *after* calling this function.
1208  */
1209 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1210 				     struct vmw_sw_context *sw_context)
1211 {
1212 	/*
1213 	 * The validate list should still hold references to all
1214 	 * contexts here.
1215 	 */
1216 
1217 	if (sw_context->needs_post_query_barrier) {
1218 		struct vmw_res_cache_entry *ctx_entry =
1219 			&sw_context->res_cache[vmw_res_context];
1220 		struct vmw_resource *ctx;
1221 		int ret;
1222 
1223 		BUG_ON(!ctx_entry->valid);
1224 		ctx = ctx_entry->res;
1225 
1226 		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1227 
1228 		if (unlikely(ret != 0))
1229 			DRM_ERROR("Out of fifo space for dummy query.\n");
1230 	}
1231 
1232 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1233 		if (dev_priv->pinned_bo) {
1234 			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1235 			vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1236 		}
1237 
1238 		if (!sw_context->needs_post_query_barrier) {
1239 			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1240 
1241 			/*
1242 			 * We pin also the dummy_query_bo buffer so that we
1243 			 * don't need to validate it when emitting
1244 			 * dummy queries in context destroy paths.
1245 			 */
1246 
1247 			if (!dev_priv->dummy_query_bo_pinned) {
1248 				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1249 						    true);
1250 				dev_priv->dummy_query_bo_pinned = true;
1251 			}
1252 
1253 			BUG_ON(sw_context->last_query_ctx == NULL);
1254 			dev_priv->query_cid = sw_context->last_query_ctx->id;
1255 			dev_priv->query_cid_valid = true;
1256 			dev_priv->pinned_bo =
1257 				vmw_dmabuf_reference(sw_context->cur_query_bo);
1258 		}
1259 	}
1260 }
1261 
1262 /**
1263  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1264  * handle to a MOB id.
1265  *
1266  * @dev_priv: Pointer to a device private structure.
1267  * @sw_context: The software context used for this command batch validation.
1268  * @id: Pointer to the user-space handle to be translated.
1269  * @vmw_bo_p: Points to a location that, on successful return will carry
1270  * a reference-counted pointer to the DMA buffer identified by the
1271  * user-space handle in @id.
1272  *
1273  * This function saves information needed to translate a user-space buffer
1274  * handle to a MOB id. The translation does not take place immediately, but
1275  * during a call to vmw_apply_relocations(). This function builds a relocation
1276  * list and a list of buffers to validate. The former needs to be freed using
1277  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1278  * needs to be freed using vmw_clear_validations.
1279  */
1280 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1281 				 struct vmw_sw_context *sw_context,
1282 				 SVGAMobId *id,
1283 				 struct vmw_dma_buffer **vmw_bo_p)
1284 {
1285 	struct vmw_dma_buffer *vmw_bo = NULL;
1286 	uint32_t handle = *id;
1287 	struct vmw_relocation *reloc;
1288 	int ret;
1289 
1290 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1291 				     NULL);
1292 	if (unlikely(ret != 0)) {
1293 		DRM_ERROR("Could not find or use MOB buffer.\n");
1294 		ret = -EINVAL;
1295 		goto out_no_reloc;
1296 	}
1297 
1298 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1299 		DRM_ERROR("Max number relocations per submission"
1300 			  " exceeded\n");
1301 		ret = -EINVAL;
1302 		goto out_no_reloc;
1303 	}
1304 
1305 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
1306 	reloc->mob_loc = id;
1307 	reloc->location = NULL;
1308 
1309 	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1310 	if (unlikely(ret != 0))
1311 		goto out_no_reloc;
1312 
1313 	*vmw_bo_p = vmw_bo;
1314 	return 0;
1315 
1316 out_no_reloc:
1317 	vmw_dmabuf_unreference(&vmw_bo);
1318 	*vmw_bo_p = NULL;
1319 	return ret;
1320 }
1321 
1322 /**
1323  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1324  * handle to a valid SVGAGuestPtr
1325  *
1326  * @dev_priv: Pointer to a device private structure.
1327  * @sw_context: The software context used for this command batch validation.
1328  * @ptr: Pointer to the user-space handle to be translated.
1329  * @vmw_bo_p: Points to a location that, on successful return will carry
1330  * a reference-counted pointer to the DMA buffer identified by the
1331  * user-space handle in @id.
1332  *
1333  * This function saves information needed to translate a user-space buffer
1334  * handle to a valid SVGAGuestPtr. The translation does not take place
1335  * immediately, but during a call to vmw_apply_relocations().
1336  * This function builds a relocation list and a list of buffers to validate.
1337  * The former needs to be freed using either vmw_apply_relocations() or
1338  * vmw_free_relocations(). The latter needs to be freed using
1339  * vmw_clear_validations.
1340  */
1341 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1342 				   struct vmw_sw_context *sw_context,
1343 				   SVGAGuestPtr *ptr,
1344 				   struct vmw_dma_buffer **vmw_bo_p)
1345 {
1346 	struct vmw_dma_buffer *vmw_bo = NULL;
1347 	uint32_t handle = ptr->gmrId;
1348 	struct vmw_relocation *reloc;
1349 	int ret;
1350 
1351 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1352 				     NULL);
1353 	if (unlikely(ret != 0)) {
1354 		DRM_ERROR("Could not find or use GMR region.\n");
1355 		ret = -EINVAL;
1356 		goto out_no_reloc;
1357 	}
1358 
1359 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1360 		DRM_ERROR("Max number relocations per submission"
1361 			  " exceeded\n");
1362 		ret = -EINVAL;
1363 		goto out_no_reloc;
1364 	}
1365 
1366 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
1367 	reloc->location = ptr;
1368 
1369 	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1370 	if (unlikely(ret != 0))
1371 		goto out_no_reloc;
1372 
1373 	*vmw_bo_p = vmw_bo;
1374 	return 0;
1375 
1376 out_no_reloc:
1377 	vmw_dmabuf_unreference(&vmw_bo);
1378 	*vmw_bo_p = NULL;
1379 	return ret;
1380 }
1381 
1382 
1383 
1384 /**
1385  * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1386  *
1387  * @dev_priv: Pointer to a device private struct.
1388  * @sw_context: The software context used for this command submission.
1389  * @header: Pointer to the command header in the command stream.
1390  *
1391  * This function adds the new query into the query COTABLE
1392  */
1393 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1394 				   struct vmw_sw_context *sw_context,
1395 				   SVGA3dCmdHeader *header)
1396 {
1397 	struct vmw_dx_define_query_cmd {
1398 		SVGA3dCmdHeader header;
1399 		SVGA3dCmdDXDefineQuery q;
1400 	} *cmd;
1401 
1402 	int    ret;
1403 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1404 	struct vmw_resource *cotable_res;
1405 
1406 
1407 	if (ctx_node == NULL) {
1408 		DRM_ERROR("DX Context not set for query.\n");
1409 		return -EINVAL;
1410 	}
1411 
1412 	cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1413 
1414 	if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
1415 	    cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1416 		return -EINVAL;
1417 
1418 	cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1419 	ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1420 	vmw_resource_unreference(&cotable_res);
1421 
1422 	return ret;
1423 }
1424 
1425 
1426 
1427 /**
1428  * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1429  *
1430  * @dev_priv: Pointer to a device private struct.
1431  * @sw_context: The software context used for this command submission.
1432  * @header: Pointer to the command header in the command stream.
1433  *
1434  * The query bind operation will eventually associate the query ID
1435  * with its backing MOB.  In this function, we take the user mode
1436  * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1437  * kernel mode equivalent.
1438  */
1439 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1440 				 struct vmw_sw_context *sw_context,
1441 				 SVGA3dCmdHeader *header)
1442 {
1443 	struct vmw_dx_bind_query_cmd {
1444 		SVGA3dCmdHeader header;
1445 		SVGA3dCmdDXBindQuery q;
1446 	} *cmd;
1447 
1448 	struct vmw_dma_buffer *vmw_bo;
1449 	int    ret;
1450 
1451 
1452 	cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1453 
1454 	/*
1455 	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1456 	 * list so its kernel mode MOB ID can be filled in later
1457 	 */
1458 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1459 				    &vmw_bo);
1460 
1461 	if (ret != 0)
1462 		return ret;
1463 
1464 	sw_context->dx_query_mob = vmw_bo;
1465 	sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1466 
1467 	vmw_dmabuf_unreference(&vmw_bo);
1468 
1469 	return ret;
1470 }
1471 
1472 
1473 
1474 /**
1475  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
1476  *
1477  * @dev_priv: Pointer to a device private struct.
1478  * @sw_context: The software context used for this command submission.
1479  * @header: Pointer to the command header in the command stream.
1480  */
1481 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1482 				  struct vmw_sw_context *sw_context,
1483 				  SVGA3dCmdHeader *header)
1484 {
1485 	struct vmw_begin_gb_query_cmd {
1486 		SVGA3dCmdHeader header;
1487 		SVGA3dCmdBeginGBQuery q;
1488 	} *cmd;
1489 
1490 	cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1491 			   header);
1492 
1493 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1494 				 user_context_converter, &cmd->q.cid,
1495 				 NULL);
1496 }
1497 
1498 /**
1499  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1500  *
1501  * @dev_priv: Pointer to a device private struct.
1502  * @sw_context: The software context used for this command submission.
1503  * @header: Pointer to the command header in the command stream.
1504  */
1505 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1506 			       struct vmw_sw_context *sw_context,
1507 			       SVGA3dCmdHeader *header)
1508 {
1509 	struct vmw_begin_query_cmd {
1510 		SVGA3dCmdHeader header;
1511 		SVGA3dCmdBeginQuery q;
1512 	} *cmd;
1513 
1514 	cmd = container_of(header, struct vmw_begin_query_cmd,
1515 			   header);
1516 
1517 	if (unlikely(dev_priv->has_mob)) {
1518 		struct {
1519 			SVGA3dCmdHeader header;
1520 			SVGA3dCmdBeginGBQuery q;
1521 		} gb_cmd;
1522 
1523 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1524 
1525 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1526 		gb_cmd.header.size = cmd->header.size;
1527 		gb_cmd.q.cid = cmd->q.cid;
1528 		gb_cmd.q.type = cmd->q.type;
1529 
1530 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1531 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1532 	}
1533 
1534 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1535 				 user_context_converter, &cmd->q.cid,
1536 				 NULL);
1537 }
1538 
1539 /**
1540  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1541  *
1542  * @dev_priv: Pointer to a device private struct.
1543  * @sw_context: The software context used for this command submission.
1544  * @header: Pointer to the command header in the command stream.
1545  */
1546 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1547 				struct vmw_sw_context *sw_context,
1548 				SVGA3dCmdHeader *header)
1549 {
1550 	struct vmw_dma_buffer *vmw_bo;
1551 	struct vmw_query_cmd {
1552 		SVGA3dCmdHeader header;
1553 		SVGA3dCmdEndGBQuery q;
1554 	} *cmd;
1555 	int ret;
1556 
1557 	cmd = container_of(header, struct vmw_query_cmd, header);
1558 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1559 	if (unlikely(ret != 0))
1560 		return ret;
1561 
1562 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1563 				    &cmd->q.mobid,
1564 				    &vmw_bo);
1565 	if (unlikely(ret != 0))
1566 		return ret;
1567 
1568 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1569 
1570 	vmw_dmabuf_unreference(&vmw_bo);
1571 	return ret;
1572 }
1573 
1574 /**
1575  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1576  *
1577  * @dev_priv: Pointer to a device private struct.
1578  * @sw_context: The software context used for this command submission.
1579  * @header: Pointer to the command header in the command stream.
1580  */
1581 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1582 			     struct vmw_sw_context *sw_context,
1583 			     SVGA3dCmdHeader *header)
1584 {
1585 	struct vmw_dma_buffer *vmw_bo;
1586 	struct vmw_query_cmd {
1587 		SVGA3dCmdHeader header;
1588 		SVGA3dCmdEndQuery q;
1589 	} *cmd;
1590 	int ret;
1591 
1592 	cmd = container_of(header, struct vmw_query_cmd, header);
1593 	if (dev_priv->has_mob) {
1594 		struct {
1595 			SVGA3dCmdHeader header;
1596 			SVGA3dCmdEndGBQuery q;
1597 		} gb_cmd;
1598 
1599 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1600 
1601 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1602 		gb_cmd.header.size = cmd->header.size;
1603 		gb_cmd.q.cid = cmd->q.cid;
1604 		gb_cmd.q.type = cmd->q.type;
1605 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1606 		gb_cmd.q.offset = cmd->q.guestResult.offset;
1607 
1608 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1609 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1610 	}
1611 
1612 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1613 	if (unlikely(ret != 0))
1614 		return ret;
1615 
1616 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1617 				      &cmd->q.guestResult,
1618 				      &vmw_bo);
1619 	if (unlikely(ret != 0))
1620 		return ret;
1621 
1622 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1623 
1624 	vmw_dmabuf_unreference(&vmw_bo);
1625 	return ret;
1626 }
1627 
1628 /**
1629  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1630  *
1631  * @dev_priv: Pointer to a device private struct.
1632  * @sw_context: The software context used for this command submission.
1633  * @header: Pointer to the command header in the command stream.
1634  */
1635 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1636 				 struct vmw_sw_context *sw_context,
1637 				 SVGA3dCmdHeader *header)
1638 {
1639 	struct vmw_dma_buffer *vmw_bo;
1640 	struct vmw_query_cmd {
1641 		SVGA3dCmdHeader header;
1642 		SVGA3dCmdWaitForGBQuery q;
1643 	} *cmd;
1644 	int ret;
1645 
1646 	cmd = container_of(header, struct vmw_query_cmd, header);
1647 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1648 	if (unlikely(ret != 0))
1649 		return ret;
1650 
1651 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1652 				    &cmd->q.mobid,
1653 				    &vmw_bo);
1654 	if (unlikely(ret != 0))
1655 		return ret;
1656 
1657 	vmw_dmabuf_unreference(&vmw_bo);
1658 	return 0;
1659 }
1660 
1661 /**
1662  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1663  *
1664  * @dev_priv: Pointer to a device private struct.
1665  * @sw_context: The software context used for this command submission.
1666  * @header: Pointer to the command header in the command stream.
1667  */
1668 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1669 			      struct vmw_sw_context *sw_context,
1670 			      SVGA3dCmdHeader *header)
1671 {
1672 	struct vmw_dma_buffer *vmw_bo;
1673 	struct vmw_query_cmd {
1674 		SVGA3dCmdHeader header;
1675 		SVGA3dCmdWaitForQuery q;
1676 	} *cmd;
1677 	int ret;
1678 
1679 	cmd = container_of(header, struct vmw_query_cmd, header);
1680 	if (dev_priv->has_mob) {
1681 		struct {
1682 			SVGA3dCmdHeader header;
1683 			SVGA3dCmdWaitForGBQuery q;
1684 		} gb_cmd;
1685 
1686 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1687 
1688 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1689 		gb_cmd.header.size = cmd->header.size;
1690 		gb_cmd.q.cid = cmd->q.cid;
1691 		gb_cmd.q.type = cmd->q.type;
1692 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1693 		gb_cmd.q.offset = cmd->q.guestResult.offset;
1694 
1695 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1696 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1697 	}
1698 
1699 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1700 	if (unlikely(ret != 0))
1701 		return ret;
1702 
1703 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1704 				      &cmd->q.guestResult,
1705 				      &vmw_bo);
1706 	if (unlikely(ret != 0))
1707 		return ret;
1708 
1709 	vmw_dmabuf_unreference(&vmw_bo);
1710 	return 0;
1711 }
1712 
1713 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1714 		       struct vmw_sw_context *sw_context,
1715 		       SVGA3dCmdHeader *header)
1716 {
1717 	struct vmw_dma_buffer *vmw_bo = NULL;
1718 	struct vmw_surface *srf = NULL;
1719 	struct vmw_dma_cmd {
1720 		SVGA3dCmdHeader header;
1721 		SVGA3dCmdSurfaceDMA dma;
1722 	} *cmd;
1723 	int ret;
1724 	SVGA3dCmdSurfaceDMASuffix *suffix;
1725 	uint32_t bo_size;
1726 
1727 	cmd = container_of(header, struct vmw_dma_cmd, header);
1728 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1729 					       header->size - sizeof(*suffix));
1730 
1731 	/* Make sure device and verifier stays in sync. */
1732 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1733 		DRM_ERROR("Invalid DMA suffix size.\n");
1734 		return -EINVAL;
1735 	}
1736 
1737 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1738 				      &cmd->dma.guest.ptr,
1739 				      &vmw_bo);
1740 	if (unlikely(ret != 0))
1741 		return ret;
1742 
1743 	/* Make sure DMA doesn't cross BO boundaries. */
1744 	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1745 	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1746 		DRM_ERROR("Invalid DMA offset.\n");
1747 		return -EINVAL;
1748 	}
1749 
1750 	bo_size -= cmd->dma.guest.ptr.offset;
1751 	if (unlikely(suffix->maximumOffset > bo_size))
1752 		suffix->maximumOffset = bo_size;
1753 
1754 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1755 				user_surface_converter, &cmd->dma.host.sid,
1756 				NULL);
1757 	if (unlikely(ret != 0)) {
1758 		if (unlikely(ret != -ERESTARTSYS))
1759 			DRM_ERROR("could not find surface for DMA.\n");
1760 		goto out_no_surface;
1761 	}
1762 
1763 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1764 
1765 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1766 			     header);
1767 
1768 out_no_surface:
1769 	vmw_dmabuf_unreference(&vmw_bo);
1770 	return ret;
1771 }
1772 
1773 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1774 			struct vmw_sw_context *sw_context,
1775 			SVGA3dCmdHeader *header)
1776 {
1777 	struct vmw_draw_cmd {
1778 		SVGA3dCmdHeader header;
1779 		SVGA3dCmdDrawPrimitives body;
1780 	} *cmd;
1781 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1782 		(unsigned long)header + sizeof(*cmd));
1783 	SVGA3dPrimitiveRange *range;
1784 	uint32_t i;
1785 	uint32_t maxnum;
1786 	int ret;
1787 
1788 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1789 	if (unlikely(ret != 0))
1790 		return ret;
1791 
1792 	cmd = container_of(header, struct vmw_draw_cmd, header);
1793 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1794 
1795 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1796 		DRM_ERROR("Illegal number of vertex declarations.\n");
1797 		return -EINVAL;
1798 	}
1799 
1800 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1801 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1802 					user_surface_converter,
1803 					&decl->array.surfaceId, NULL);
1804 		if (unlikely(ret != 0))
1805 			return ret;
1806 	}
1807 
1808 	maxnum = (header->size - sizeof(cmd->body) -
1809 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1810 	if (unlikely(cmd->body.numRanges > maxnum)) {
1811 		DRM_ERROR("Illegal number of index ranges.\n");
1812 		return -EINVAL;
1813 	}
1814 
1815 	range = (SVGA3dPrimitiveRange *) decl;
1816 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1817 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1818 					user_surface_converter,
1819 					&range->indexArray.surfaceId, NULL);
1820 		if (unlikely(ret != 0))
1821 			return ret;
1822 	}
1823 	return 0;
1824 }
1825 
1826 
1827 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1828 			     struct vmw_sw_context *sw_context,
1829 			     SVGA3dCmdHeader *header)
1830 {
1831 	struct vmw_tex_state_cmd {
1832 		SVGA3dCmdHeader header;
1833 		SVGA3dCmdSetTextureState state;
1834 	} *cmd;
1835 
1836 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1837 	  ((unsigned long) header + header->size + sizeof(header));
1838 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1839 		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1840 	struct vmw_resource_val_node *ctx_node;
1841 	struct vmw_resource_val_node *res_node;
1842 	int ret;
1843 
1844 	cmd = container_of(header, struct vmw_tex_state_cmd,
1845 			   header);
1846 
1847 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1848 				user_context_converter, &cmd->state.cid,
1849 				&ctx_node);
1850 	if (unlikely(ret != 0))
1851 		return ret;
1852 
1853 	for (; cur_state < last_state; ++cur_state) {
1854 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1855 			continue;
1856 
1857 		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1858 			DRM_ERROR("Illegal texture/sampler unit %u.\n",
1859 				  (unsigned) cur_state->stage);
1860 			return -EINVAL;
1861 		}
1862 
1863 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1864 					user_surface_converter,
1865 					&cur_state->value, &res_node);
1866 		if (unlikely(ret != 0))
1867 			return ret;
1868 
1869 		if (dev_priv->has_mob) {
1870 			struct vmw_ctx_bindinfo_tex binding;
1871 
1872 			binding.bi.ctx = ctx_node->res;
1873 			binding.bi.res = res_node ? res_node->res : NULL;
1874 			binding.bi.bt = vmw_ctx_binding_tex;
1875 			binding.texture_stage = cur_state->stage;
1876 			vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1877 					0, binding.texture_stage);
1878 		}
1879 	}
1880 
1881 	return 0;
1882 }
1883 
1884 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1885 				      struct vmw_sw_context *sw_context,
1886 				      void *buf)
1887 {
1888 	struct vmw_dma_buffer *vmw_bo;
1889 	int ret;
1890 
1891 	struct {
1892 		uint32_t header;
1893 		SVGAFifoCmdDefineGMRFB body;
1894 	} *cmd = buf;
1895 
1896 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1897 				      &cmd->body.ptr,
1898 				      &vmw_bo);
1899 	if (unlikely(ret != 0))
1900 		return ret;
1901 
1902 	vmw_dmabuf_unreference(&vmw_bo);
1903 
1904 	return ret;
1905 }
1906 
1907 
1908 /**
1909  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1910  * switching
1911  *
1912  * @dev_priv: Pointer to a device private struct.
1913  * @sw_context: The software context being used for this batch.
1914  * @val_node: The validation node representing the resource.
1915  * @buf_id: Pointer to the user-space backup buffer handle in the command
1916  * stream.
1917  * @backup_offset: Offset of backup into MOB.
1918  *
1919  * This function prepares for registering a switch of backup buffers
1920  * in the resource metadata just prior to unreserving. It's basically a wrapper
1921  * around vmw_cmd_res_switch_backup with a different interface.
1922  */
1923 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1924 				     struct vmw_sw_context *sw_context,
1925 				     struct vmw_resource_val_node *val_node,
1926 				     uint32_t *buf_id,
1927 				     unsigned long backup_offset)
1928 {
1929 	struct vmw_dma_buffer *dma_buf;
1930 	int ret;
1931 
1932 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1933 	if (ret)
1934 		return ret;
1935 
1936 	val_node->switching_backup = true;
1937 	if (val_node->first_usage)
1938 		val_node->no_buffer_needed = true;
1939 
1940 	vmw_dmabuf_unreference(&val_node->new_backup);
1941 	val_node->new_backup = dma_buf;
1942 	val_node->new_backup_offset = backup_offset;
1943 
1944 	return 0;
1945 }
1946 
1947 
1948 /**
1949  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1950  *
1951  * @dev_priv: Pointer to a device private struct.
1952  * @sw_context: The software context being used for this batch.
1953  * @res_type: The resource type.
1954  * @converter: Information about user-space binding for this resource type.
1955  * @res_id: Pointer to the user-space resource handle in the command stream.
1956  * @buf_id: Pointer to the user-space backup buffer handle in the command
1957  * stream.
1958  * @backup_offset: Offset of backup into MOB.
1959  *
1960  * This function prepares for registering a switch of backup buffers
1961  * in the resource metadata just prior to unreserving. It's basically a wrapper
1962  * around vmw_cmd_res_switch_backup with a different interface.
1963  */
1964 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1965 				 struct vmw_sw_context *sw_context,
1966 				 enum vmw_res_type res_type,
1967 				 const struct vmw_user_resource_conv
1968 				 *converter,
1969 				 uint32_t *res_id,
1970 				 uint32_t *buf_id,
1971 				 unsigned long backup_offset)
1972 {
1973 	struct vmw_resource_val_node *val_node;
1974 	int ret;
1975 
1976 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1977 				converter, res_id, &val_node);
1978 	if (ret)
1979 		return ret;
1980 
1981 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1982 					 buf_id, backup_offset);
1983 }
1984 
1985 /**
1986  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1987  * command
1988  *
1989  * @dev_priv: Pointer to a device private struct.
1990  * @sw_context: The software context being used for this batch.
1991  * @header: Pointer to the command header in the command stream.
1992  */
1993 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1994 				   struct vmw_sw_context *sw_context,
1995 				   SVGA3dCmdHeader *header)
1996 {
1997 	struct vmw_bind_gb_surface_cmd {
1998 		SVGA3dCmdHeader header;
1999 		SVGA3dCmdBindGBSurface body;
2000 	} *cmd;
2001 
2002 	cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
2003 
2004 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
2005 				     user_surface_converter,
2006 				     &cmd->body.sid, &cmd->body.mobid,
2007 				     0);
2008 }
2009 
2010 /**
2011  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
2012  * command
2013  *
2014  * @dev_priv: Pointer to a device private struct.
2015  * @sw_context: The software context being used for this batch.
2016  * @header: Pointer to the command header in the command stream.
2017  */
2018 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
2019 				   struct vmw_sw_context *sw_context,
2020 				   SVGA3dCmdHeader *header)
2021 {
2022 	struct vmw_gb_surface_cmd {
2023 		SVGA3dCmdHeader header;
2024 		SVGA3dCmdUpdateGBImage body;
2025 	} *cmd;
2026 
2027 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2028 
2029 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2030 				 user_surface_converter,
2031 				 &cmd->body.image.sid, NULL);
2032 }
2033 
2034 /**
2035  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
2036  * command
2037  *
2038  * @dev_priv: Pointer to a device private struct.
2039  * @sw_context: The software context being used for this batch.
2040  * @header: Pointer to the command header in the command stream.
2041  */
2042 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
2043 				     struct vmw_sw_context *sw_context,
2044 				     SVGA3dCmdHeader *header)
2045 {
2046 	struct vmw_gb_surface_cmd {
2047 		SVGA3dCmdHeader header;
2048 		SVGA3dCmdUpdateGBSurface body;
2049 	} *cmd;
2050 
2051 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2052 
2053 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2054 				 user_surface_converter,
2055 				 &cmd->body.sid, NULL);
2056 }
2057 
2058 /**
2059  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2060  * command
2061  *
2062  * @dev_priv: Pointer to a device private struct.
2063  * @sw_context: The software context being used for this batch.
2064  * @header: Pointer to the command header in the command stream.
2065  */
2066 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2067 				     struct vmw_sw_context *sw_context,
2068 				     SVGA3dCmdHeader *header)
2069 {
2070 	struct vmw_gb_surface_cmd {
2071 		SVGA3dCmdHeader header;
2072 		SVGA3dCmdReadbackGBImage body;
2073 	} *cmd;
2074 
2075 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2076 
2077 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2078 				 user_surface_converter,
2079 				 &cmd->body.image.sid, NULL);
2080 }
2081 
2082 /**
2083  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2084  * command
2085  *
2086  * @dev_priv: Pointer to a device private struct.
2087  * @sw_context: The software context being used for this batch.
2088  * @header: Pointer to the command header in the command stream.
2089  */
2090 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2091 				       struct vmw_sw_context *sw_context,
2092 				       SVGA3dCmdHeader *header)
2093 {
2094 	struct vmw_gb_surface_cmd {
2095 		SVGA3dCmdHeader header;
2096 		SVGA3dCmdReadbackGBSurface body;
2097 	} *cmd;
2098 
2099 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2100 
2101 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2102 				 user_surface_converter,
2103 				 &cmd->body.sid, NULL);
2104 }
2105 
2106 /**
2107  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2108  * command
2109  *
2110  * @dev_priv: Pointer to a device private struct.
2111  * @sw_context: The software context being used for this batch.
2112  * @header: Pointer to the command header in the command stream.
2113  */
2114 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2115 				       struct vmw_sw_context *sw_context,
2116 				       SVGA3dCmdHeader *header)
2117 {
2118 	struct vmw_gb_surface_cmd {
2119 		SVGA3dCmdHeader header;
2120 		SVGA3dCmdInvalidateGBImage body;
2121 	} *cmd;
2122 
2123 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2124 
2125 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2126 				 user_surface_converter,
2127 				 &cmd->body.image.sid, NULL);
2128 }
2129 
2130 /**
2131  * vmw_cmd_invalidate_gb_surface - Validate an
2132  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2133  *
2134  * @dev_priv: Pointer to a device private struct.
2135  * @sw_context: The software context being used for this batch.
2136  * @header: Pointer to the command header in the command stream.
2137  */
2138 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2139 					 struct vmw_sw_context *sw_context,
2140 					 SVGA3dCmdHeader *header)
2141 {
2142 	struct vmw_gb_surface_cmd {
2143 		SVGA3dCmdHeader header;
2144 		SVGA3dCmdInvalidateGBSurface body;
2145 	} *cmd;
2146 
2147 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2148 
2149 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2150 				 user_surface_converter,
2151 				 &cmd->body.sid, NULL);
2152 }
2153 
2154 
2155 /**
2156  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2157  * command
2158  *
2159  * @dev_priv: Pointer to a device private struct.
2160  * @sw_context: The software context being used for this batch.
2161  * @header: Pointer to the command header in the command stream.
2162  */
2163 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2164 				 struct vmw_sw_context *sw_context,
2165 				 SVGA3dCmdHeader *header)
2166 {
2167 	struct vmw_shader_define_cmd {
2168 		SVGA3dCmdHeader header;
2169 		SVGA3dCmdDefineShader body;
2170 	} *cmd;
2171 	int ret;
2172 	size_t size;
2173 	struct vmw_resource_val_node *val;
2174 
2175 	cmd = container_of(header, struct vmw_shader_define_cmd,
2176 			   header);
2177 
2178 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2179 				user_context_converter, &cmd->body.cid,
2180 				&val);
2181 	if (unlikely(ret != 0))
2182 		return ret;
2183 
2184 	if (unlikely(!dev_priv->has_mob))
2185 		return 0;
2186 
2187 	size = cmd->header.size - sizeof(cmd->body);
2188 	ret = vmw_compat_shader_add(dev_priv,
2189 				    vmw_context_res_man(val->res),
2190 				    cmd->body.shid, cmd + 1,
2191 				    cmd->body.type, size,
2192 				    &sw_context->staged_cmd_res);
2193 	if (unlikely(ret != 0))
2194 		return ret;
2195 
2196 	return vmw_resource_relocation_add(&sw_context->res_relocations,
2197 					   NULL,
2198 					   vmw_ptr_diff(sw_context->buf_start,
2199 							&cmd->header.id),
2200 					   vmw_res_rel_nop);
2201 }
2202 
2203 /**
2204  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2205  * command
2206  *
2207  * @dev_priv: Pointer to a device private struct.
2208  * @sw_context: The software context being used for this batch.
2209  * @header: Pointer to the command header in the command stream.
2210  */
2211 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2212 				  struct vmw_sw_context *sw_context,
2213 				  SVGA3dCmdHeader *header)
2214 {
2215 	struct vmw_shader_destroy_cmd {
2216 		SVGA3dCmdHeader header;
2217 		SVGA3dCmdDestroyShader body;
2218 	} *cmd;
2219 	int ret;
2220 	struct vmw_resource_val_node *val;
2221 
2222 	cmd = container_of(header, struct vmw_shader_destroy_cmd,
2223 			   header);
2224 
2225 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2226 				user_context_converter, &cmd->body.cid,
2227 				&val);
2228 	if (unlikely(ret != 0))
2229 		return ret;
2230 
2231 	if (unlikely(!dev_priv->has_mob))
2232 		return 0;
2233 
2234 	ret = vmw_shader_remove(vmw_context_res_man(val->res),
2235 				cmd->body.shid,
2236 				cmd->body.type,
2237 				&sw_context->staged_cmd_res);
2238 	if (unlikely(ret != 0))
2239 		return ret;
2240 
2241 	return vmw_resource_relocation_add(&sw_context->res_relocations,
2242 					   NULL,
2243 					   vmw_ptr_diff(sw_context->buf_start,
2244 							&cmd->header.id),
2245 					   vmw_res_rel_nop);
2246 }
2247 
2248 /**
2249  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2250  * command
2251  *
2252  * @dev_priv: Pointer to a device private struct.
2253  * @sw_context: The software context being used for this batch.
2254  * @header: Pointer to the command header in the command stream.
2255  */
2256 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2257 			      struct vmw_sw_context *sw_context,
2258 			      SVGA3dCmdHeader *header)
2259 {
2260 	struct vmw_set_shader_cmd {
2261 		SVGA3dCmdHeader header;
2262 		SVGA3dCmdSetShader body;
2263 	} *cmd;
2264 	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2265 	struct vmw_ctx_bindinfo_shader binding;
2266 	struct vmw_resource *res = NULL;
2267 	int ret;
2268 
2269 	cmd = container_of(header, struct vmw_set_shader_cmd,
2270 			   header);
2271 
2272 	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2273 		DRM_ERROR("Illegal shader type %u.\n",
2274 			  (unsigned) cmd->body.type);
2275 		return -EINVAL;
2276 	}
2277 
2278 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2279 				user_context_converter, &cmd->body.cid,
2280 				&ctx_node);
2281 	if (unlikely(ret != 0))
2282 		return ret;
2283 
2284 	if (!dev_priv->has_mob)
2285 		return 0;
2286 
2287 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2288 		res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2289 					cmd->body.shid,
2290 					cmd->body.type);
2291 
2292 		if (!IS_ERR(res)) {
2293 			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2294 						    &cmd->body.shid, res,
2295 						    &res_node);
2296 			vmw_resource_unreference(&res);
2297 			if (unlikely(ret != 0))
2298 				return ret;
2299 		}
2300 	}
2301 
2302 	if (!res_node) {
2303 		ret = vmw_cmd_res_check(dev_priv, sw_context,
2304 					vmw_res_shader,
2305 					user_shader_converter,
2306 					&cmd->body.shid, &res_node);
2307 		if (unlikely(ret != 0))
2308 			return ret;
2309 	}
2310 
2311 	binding.bi.ctx = ctx_node->res;
2312 	binding.bi.res = res_node ? res_node->res : NULL;
2313 	binding.bi.bt = vmw_ctx_binding_shader;
2314 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2315 	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2316 			binding.shader_slot, 0);
2317 	return 0;
2318 }
2319 
2320 /**
2321  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2322  * command
2323  *
2324  * @dev_priv: Pointer to a device private struct.
2325  * @sw_context: The software context being used for this batch.
2326  * @header: Pointer to the command header in the command stream.
2327  */
2328 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2329 				    struct vmw_sw_context *sw_context,
2330 				    SVGA3dCmdHeader *header)
2331 {
2332 	struct vmw_set_shader_const_cmd {
2333 		SVGA3dCmdHeader header;
2334 		SVGA3dCmdSetShaderConst body;
2335 	} *cmd;
2336 	int ret;
2337 
2338 	cmd = container_of(header, struct vmw_set_shader_const_cmd,
2339 			   header);
2340 
2341 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2342 				user_context_converter, &cmd->body.cid,
2343 				NULL);
2344 	if (unlikely(ret != 0))
2345 		return ret;
2346 
2347 	if (dev_priv->has_mob)
2348 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2349 
2350 	return 0;
2351 }
2352 
2353 /**
2354  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2355  * command
2356  *
2357  * @dev_priv: Pointer to a device private struct.
2358  * @sw_context: The software context being used for this batch.
2359  * @header: Pointer to the command header in the command stream.
2360  */
2361 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2362 				  struct vmw_sw_context *sw_context,
2363 				  SVGA3dCmdHeader *header)
2364 {
2365 	struct vmw_bind_gb_shader_cmd {
2366 		SVGA3dCmdHeader header;
2367 		SVGA3dCmdBindGBShader body;
2368 	} *cmd;
2369 
2370 	cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2371 			   header);
2372 
2373 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2374 				     user_shader_converter,
2375 				     &cmd->body.shid, &cmd->body.mobid,
2376 				     cmd->body.offsetInBytes);
2377 }
2378 
2379 /**
2380  * vmw_cmd_dx_set_single_constant_buffer - Validate an
2381  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2382  *
2383  * @dev_priv: Pointer to a device private struct.
2384  * @sw_context: The software context being used for this batch.
2385  * @header: Pointer to the command header in the command stream.
2386  */
2387 static int
2388 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2389 				      struct vmw_sw_context *sw_context,
2390 				      SVGA3dCmdHeader *header)
2391 {
2392 	struct {
2393 		SVGA3dCmdHeader header;
2394 		SVGA3dCmdDXSetSingleConstantBuffer body;
2395 	} *cmd;
2396 	struct vmw_resource_val_node *res_node = NULL;
2397 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2398 	struct vmw_ctx_bindinfo_cb binding;
2399 	int ret;
2400 
2401 	if (unlikely(ctx_node == NULL)) {
2402 		DRM_ERROR("DX Context not set.\n");
2403 		return -EINVAL;
2404 	}
2405 
2406 	cmd = container_of(header, typeof(*cmd), header);
2407 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2408 				user_surface_converter,
2409 				&cmd->body.sid, &res_node);
2410 	if (unlikely(ret != 0))
2411 		return ret;
2412 
2413 	binding.bi.ctx = ctx_node->res;
2414 	binding.bi.res = res_node ? res_node->res : NULL;
2415 	binding.bi.bt = vmw_ctx_binding_cb;
2416 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2417 	binding.offset = cmd->body.offsetInBytes;
2418 	binding.size = cmd->body.sizeInBytes;
2419 	binding.slot = cmd->body.slot;
2420 
2421 	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2422 	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2423 		DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2424 			  (unsigned) cmd->body.type,
2425 			  (unsigned) binding.slot);
2426 		return -EINVAL;
2427 	}
2428 
2429 	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2430 			binding.shader_slot, binding.slot);
2431 
2432 	return 0;
2433 }
2434 
2435 /**
2436  * vmw_cmd_dx_set_shader_res - Validate an
2437  * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2438  *
2439  * @dev_priv: Pointer to a device private struct.
2440  * @sw_context: The software context being used for this batch.
2441  * @header: Pointer to the command header in the command stream.
2442  */
2443 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2444 				     struct vmw_sw_context *sw_context,
2445 				     SVGA3dCmdHeader *header)
2446 {
2447 	struct {
2448 		SVGA3dCmdHeader header;
2449 		SVGA3dCmdDXSetShaderResources body;
2450 	} *cmd = container_of(header, typeof(*cmd), header);
2451 	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2452 		sizeof(SVGA3dShaderResourceViewId);
2453 
2454 	if ((u64) cmd->body.startView + (u64) num_sr_view >
2455 	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2456 	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2457 		DRM_ERROR("Invalid shader binding.\n");
2458 		return -EINVAL;
2459 	}
2460 
2461 	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2462 				     vmw_ctx_binding_sr,
2463 				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2464 				     (void *) &cmd[1], num_sr_view,
2465 				     cmd->body.startView);
2466 }
2467 
2468 /**
2469  * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2470  * command
2471  *
2472  * @dev_priv: Pointer to a device private struct.
2473  * @sw_context: The software context being used for this batch.
2474  * @header: Pointer to the command header in the command stream.
2475  */
2476 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2477 				 struct vmw_sw_context *sw_context,
2478 				 SVGA3dCmdHeader *header)
2479 {
2480 	struct {
2481 		SVGA3dCmdHeader header;
2482 		SVGA3dCmdDXSetShader body;
2483 	} *cmd;
2484 	struct vmw_resource *res = NULL;
2485 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2486 	struct vmw_ctx_bindinfo_shader binding;
2487 	int ret = 0;
2488 
2489 	if (unlikely(ctx_node == NULL)) {
2490 		DRM_ERROR("DX Context not set.\n");
2491 		return -EINVAL;
2492 	}
2493 
2494 	cmd = container_of(header, typeof(*cmd), header);
2495 
2496 	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2497 		DRM_ERROR("Illegal shader type %u.\n",
2498 			  (unsigned) cmd->body.type);
2499 		return -EINVAL;
2500 	}
2501 
2502 	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2503 		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2504 		if (IS_ERR(res)) {
2505 			DRM_ERROR("Could not find shader for binding.\n");
2506 			return PTR_ERR(res);
2507 		}
2508 
2509 		ret = vmw_resource_val_add(sw_context, res, NULL);
2510 		if (ret)
2511 			goto out_unref;
2512 	}
2513 
2514 	binding.bi.ctx = ctx_node->res;
2515 	binding.bi.res = res;
2516 	binding.bi.bt = vmw_ctx_binding_dx_shader;
2517 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2518 
2519 	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2520 			binding.shader_slot, 0);
2521 out_unref:
2522 	if (res)
2523 		vmw_resource_unreference(&res);
2524 
2525 	return ret;
2526 }
2527 
2528 /**
2529  * vmw_cmd_dx_set_vertex_buffers - Validates an
2530  * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2531  *
2532  * @dev_priv: Pointer to a device private struct.
2533  * @sw_context: The software context being used for this batch.
2534  * @header: Pointer to the command header in the command stream.
2535  */
2536 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2537 					 struct vmw_sw_context *sw_context,
2538 					 SVGA3dCmdHeader *header)
2539 {
2540 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2541 	struct vmw_ctx_bindinfo_vb binding;
2542 	struct vmw_resource_val_node *res_node;
2543 	struct {
2544 		SVGA3dCmdHeader header;
2545 		SVGA3dCmdDXSetVertexBuffers body;
2546 		SVGA3dVertexBuffer buf[];
2547 	} *cmd;
2548 	int i, ret, num;
2549 
2550 	if (unlikely(ctx_node == NULL)) {
2551 		DRM_ERROR("DX Context not set.\n");
2552 		return -EINVAL;
2553 	}
2554 
2555 	cmd = container_of(header, typeof(*cmd), header);
2556 	num = (cmd->header.size - sizeof(cmd->body)) /
2557 		sizeof(SVGA3dVertexBuffer);
2558 	if ((u64)num + (u64)cmd->body.startBuffer >
2559 	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2560 		DRM_ERROR("Invalid number of vertex buffers.\n");
2561 		return -EINVAL;
2562 	}
2563 
2564 	for (i = 0; i < num; i++) {
2565 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2566 					user_surface_converter,
2567 					&cmd->buf[i].sid, &res_node);
2568 		if (unlikely(ret != 0))
2569 			return ret;
2570 
2571 		binding.bi.ctx = ctx_node->res;
2572 		binding.bi.bt = vmw_ctx_binding_vb;
2573 		binding.bi.res = ((res_node) ? res_node->res : NULL);
2574 		binding.offset = cmd->buf[i].offset;
2575 		binding.stride = cmd->buf[i].stride;
2576 		binding.slot = i + cmd->body.startBuffer;
2577 
2578 		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2579 				0, binding.slot);
2580 	}
2581 
2582 	return 0;
2583 }
2584 
2585 /**
2586  * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2587  * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2588  *
2589  * @dev_priv: Pointer to a device private struct.
2590  * @sw_context: The software context being used for this batch.
2591  * @header: Pointer to the command header in the command stream.
2592  */
2593 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2594 				       struct vmw_sw_context *sw_context,
2595 				       SVGA3dCmdHeader *header)
2596 {
2597 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2598 	struct vmw_ctx_bindinfo_ib binding;
2599 	struct vmw_resource_val_node *res_node;
2600 	struct {
2601 		SVGA3dCmdHeader header;
2602 		SVGA3dCmdDXSetIndexBuffer body;
2603 	} *cmd;
2604 	int ret;
2605 
2606 	if (unlikely(ctx_node == NULL)) {
2607 		DRM_ERROR("DX Context not set.\n");
2608 		return -EINVAL;
2609 	}
2610 
2611 	cmd = container_of(header, typeof(*cmd), header);
2612 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2613 				user_surface_converter,
2614 				&cmd->body.sid, &res_node);
2615 	if (unlikely(ret != 0))
2616 		return ret;
2617 
2618 	binding.bi.ctx = ctx_node->res;
2619 	binding.bi.res = ((res_node) ? res_node->res : NULL);
2620 	binding.bi.bt = vmw_ctx_binding_ib;
2621 	binding.offset = cmd->body.offset;
2622 	binding.format = cmd->body.format;
2623 
2624 	vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2625 
2626 	return 0;
2627 }
2628 
2629 /**
2630  * vmw_cmd_dx_set_rendertarget - Validate an
2631  * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2632  *
2633  * @dev_priv: Pointer to a device private struct.
2634  * @sw_context: The software context being used for this batch.
2635  * @header: Pointer to the command header in the command stream.
2636  */
2637 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2638 					struct vmw_sw_context *sw_context,
2639 					SVGA3dCmdHeader *header)
2640 {
2641 	struct {
2642 		SVGA3dCmdHeader header;
2643 		SVGA3dCmdDXSetRenderTargets body;
2644 	} *cmd = container_of(header, typeof(*cmd), header);
2645 	int ret;
2646 	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2647 		sizeof(SVGA3dRenderTargetViewId);
2648 
2649 	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2650 		DRM_ERROR("Invalid DX Rendertarget binding.\n");
2651 		return -EINVAL;
2652 	}
2653 
2654 	ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2655 				    vmw_ctx_binding_ds, 0,
2656 				    &cmd->body.depthStencilViewId, 1, 0);
2657 	if (ret)
2658 		return ret;
2659 
2660 	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2661 				     vmw_ctx_binding_dx_rt, 0,
2662 				     (void *)&cmd[1], num_rt_view, 0);
2663 }
2664 
2665 /**
2666  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2667  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2668  *
2669  * @dev_priv: Pointer to a device private struct.
2670  * @sw_context: The software context being used for this batch.
2671  * @header: Pointer to the command header in the command stream.
2672  */
2673 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2674 					      struct vmw_sw_context *sw_context,
2675 					      SVGA3dCmdHeader *header)
2676 {
2677 	struct {
2678 		SVGA3dCmdHeader header;
2679 		SVGA3dCmdDXClearRenderTargetView body;
2680 	} *cmd = container_of(header, typeof(*cmd), header);
2681 
2682 	return vmw_view_id_val_add(sw_context, vmw_view_rt,
2683 				   cmd->body.renderTargetViewId);
2684 }
2685 
2686 /**
2687  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2688  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2689  *
2690  * @dev_priv: Pointer to a device private struct.
2691  * @sw_context: The software context being used for this batch.
2692  * @header: Pointer to the command header in the command stream.
2693  */
2694 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2695 					      struct vmw_sw_context *sw_context,
2696 					      SVGA3dCmdHeader *header)
2697 {
2698 	struct {
2699 		SVGA3dCmdHeader header;
2700 		SVGA3dCmdDXClearDepthStencilView body;
2701 	} *cmd = container_of(header, typeof(*cmd), header);
2702 
2703 	return vmw_view_id_val_add(sw_context, vmw_view_ds,
2704 				   cmd->body.depthStencilViewId);
2705 }
2706 
2707 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2708 				  struct vmw_sw_context *sw_context,
2709 				  SVGA3dCmdHeader *header)
2710 {
2711 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2712 	struct vmw_resource_val_node *srf_node;
2713 	struct vmw_resource *res;
2714 	enum vmw_view_type view_type;
2715 	int ret;
2716 	/*
2717 	 * This is based on the fact that all affected define commands have
2718 	 * the same initial command body layout.
2719 	 */
2720 	struct {
2721 		SVGA3dCmdHeader header;
2722 		uint32 defined_id;
2723 		uint32 sid;
2724 	} *cmd;
2725 
2726 	if (unlikely(ctx_node == NULL)) {
2727 		DRM_ERROR("DX Context not set.\n");
2728 		return -EINVAL;
2729 	}
2730 
2731 	view_type = vmw_view_cmd_to_type(header->id);
2732 	cmd = container_of(header, typeof(*cmd), header);
2733 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2734 				user_surface_converter,
2735 				&cmd->sid, &srf_node);
2736 	if (unlikely(ret != 0))
2737 		return ret;
2738 
2739 	res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2740 	ret = vmw_cotable_notify(res, cmd->defined_id);
2741 	vmw_resource_unreference(&res);
2742 	if (unlikely(ret != 0))
2743 		return ret;
2744 
2745 	return vmw_view_add(sw_context->man,
2746 			    ctx_node->res,
2747 			    srf_node->res,
2748 			    view_type,
2749 			    cmd->defined_id,
2750 			    header,
2751 			    header->size + sizeof(*header),
2752 			    &sw_context->staged_cmd_res);
2753 }
2754 
2755 /**
2756  * vmw_cmd_dx_set_so_targets - Validate an
2757  * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2758  *
2759  * @dev_priv: Pointer to a device private struct.
2760  * @sw_context: The software context being used for this batch.
2761  * @header: Pointer to the command header in the command stream.
2762  */
2763 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2764 				     struct vmw_sw_context *sw_context,
2765 				     SVGA3dCmdHeader *header)
2766 {
2767 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2768 	struct vmw_ctx_bindinfo_so binding;
2769 	struct vmw_resource_val_node *res_node;
2770 	struct {
2771 		SVGA3dCmdHeader header;
2772 		SVGA3dCmdDXSetSOTargets body;
2773 		SVGA3dSoTarget targets[];
2774 	} *cmd;
2775 	int i, ret, num;
2776 
2777 	if (unlikely(ctx_node == NULL)) {
2778 		DRM_ERROR("DX Context not set.\n");
2779 		return -EINVAL;
2780 	}
2781 
2782 	cmd = container_of(header, typeof(*cmd), header);
2783 	num = (cmd->header.size - sizeof(cmd->body)) /
2784 		sizeof(SVGA3dSoTarget);
2785 
2786 	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2787 		DRM_ERROR("Invalid DX SO binding.\n");
2788 		return -EINVAL;
2789 	}
2790 
2791 	for (i = 0; i < num; i++) {
2792 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2793 					user_surface_converter,
2794 					&cmd->targets[i].sid, &res_node);
2795 		if (unlikely(ret != 0))
2796 			return ret;
2797 
2798 		binding.bi.ctx = ctx_node->res;
2799 		binding.bi.res = ((res_node) ? res_node->res : NULL);
2800 		binding.bi.bt = vmw_ctx_binding_so,
2801 		binding.offset = cmd->targets[i].offset;
2802 		binding.size = cmd->targets[i].sizeInBytes;
2803 		binding.slot = i;
2804 
2805 		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2806 				0, binding.slot);
2807 	}
2808 
2809 	return 0;
2810 }
2811 
2812 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2813 				struct vmw_sw_context *sw_context,
2814 				SVGA3dCmdHeader *header)
2815 {
2816 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2817 	struct vmw_resource *res;
2818 	/*
2819 	 * This is based on the fact that all affected define commands have
2820 	 * the same initial command body layout.
2821 	 */
2822 	struct {
2823 		SVGA3dCmdHeader header;
2824 		uint32 defined_id;
2825 	} *cmd;
2826 	enum vmw_so_type so_type;
2827 	int ret;
2828 
2829 	if (unlikely(ctx_node == NULL)) {
2830 		DRM_ERROR("DX Context not set.\n");
2831 		return -EINVAL;
2832 	}
2833 
2834 	so_type = vmw_so_cmd_to_type(header->id);
2835 	res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2836 	cmd = container_of(header, typeof(*cmd), header);
2837 	ret = vmw_cotable_notify(res, cmd->defined_id);
2838 	vmw_resource_unreference(&res);
2839 
2840 	return ret;
2841 }
2842 
2843 /**
2844  * vmw_cmd_dx_check_subresource - Validate an
2845  * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2846  *
2847  * @dev_priv: Pointer to a device private struct.
2848  * @sw_context: The software context being used for this batch.
2849  * @header: Pointer to the command header in the command stream.
2850  */
2851 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2852 					struct vmw_sw_context *sw_context,
2853 					SVGA3dCmdHeader *header)
2854 {
2855 	struct {
2856 		SVGA3dCmdHeader header;
2857 		union {
2858 			SVGA3dCmdDXReadbackSubResource r_body;
2859 			SVGA3dCmdDXInvalidateSubResource i_body;
2860 			SVGA3dCmdDXUpdateSubResource u_body;
2861 			SVGA3dSurfaceId sid;
2862 		};
2863 	} *cmd;
2864 
2865 	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2866 		     offsetof(typeof(*cmd), sid));
2867 	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2868 		     offsetof(typeof(*cmd), sid));
2869 	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2870 		     offsetof(typeof(*cmd), sid));
2871 
2872 	cmd = container_of(header, typeof(*cmd), header);
2873 
2874 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2875 				 user_surface_converter,
2876 				 &cmd->sid, NULL);
2877 }
2878 
2879 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2880 				struct vmw_sw_context *sw_context,
2881 				SVGA3dCmdHeader *header)
2882 {
2883 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2884 
2885 	if (unlikely(ctx_node == NULL)) {
2886 		DRM_ERROR("DX Context not set.\n");
2887 		return -EINVAL;
2888 	}
2889 
2890 	return 0;
2891 }
2892 
2893 /**
2894  * vmw_cmd_dx_view_remove - validate a view remove command and
2895  * schedule the view resource for removal.
2896  *
2897  * @dev_priv: Pointer to a device private struct.
2898  * @sw_context: The software context being used for this batch.
2899  * @header: Pointer to the command header in the command stream.
2900  *
2901  * Check that the view exists, and if it was not created using this
2902  * command batch, conditionally make this command a NOP.
2903  */
2904 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2905 				  struct vmw_sw_context *sw_context,
2906 				  SVGA3dCmdHeader *header)
2907 {
2908 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2909 	struct {
2910 		SVGA3dCmdHeader header;
2911 		union vmw_view_destroy body;
2912 	} *cmd = container_of(header, typeof(*cmd), header);
2913 	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2914 	struct vmw_resource *view;
2915 	int ret;
2916 
2917 	if (!ctx_node) {
2918 		DRM_ERROR("DX Context not set.\n");
2919 		return -EINVAL;
2920 	}
2921 
2922 	ret = vmw_view_remove(sw_context->man,
2923 			      cmd->body.view_id, view_type,
2924 			      &sw_context->staged_cmd_res,
2925 			      &view);
2926 	if (ret || !view)
2927 		return ret;
2928 
2929 	/*
2930 	 * If the view wasn't created during this command batch, it might
2931 	 * have been removed due to a context swapout, so add a
2932 	 * relocation to conditionally make this command a NOP to avoid
2933 	 * device errors.
2934 	 */
2935 	return vmw_resource_relocation_add(&sw_context->res_relocations,
2936 					   view,
2937 					   vmw_ptr_diff(sw_context->buf_start,
2938 							&cmd->header.id),
2939 					   vmw_res_rel_cond_nop);
2940 }
2941 
2942 /**
2943  * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2944  * command
2945  *
2946  * @dev_priv: Pointer to a device private struct.
2947  * @sw_context: The software context being used for this batch.
2948  * @header: Pointer to the command header in the command stream.
2949  */
2950 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2951 				    struct vmw_sw_context *sw_context,
2952 				    SVGA3dCmdHeader *header)
2953 {
2954 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2955 	struct vmw_resource *res;
2956 	struct {
2957 		SVGA3dCmdHeader header;
2958 		SVGA3dCmdDXDefineShader body;
2959 	} *cmd = container_of(header, typeof(*cmd), header);
2960 	int ret;
2961 
2962 	if (!ctx_node) {
2963 		DRM_ERROR("DX Context not set.\n");
2964 		return -EINVAL;
2965 	}
2966 
2967 	res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2968 	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2969 	vmw_resource_unreference(&res);
2970 	if (ret)
2971 		return ret;
2972 
2973 	return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2974 				 cmd->body.shaderId, cmd->body.type,
2975 				 &sw_context->staged_cmd_res);
2976 }
2977 
2978 /**
2979  * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2980  * command
2981  *
2982  * @dev_priv: Pointer to a device private struct.
2983  * @sw_context: The software context being used for this batch.
2984  * @header: Pointer to the command header in the command stream.
2985  */
2986 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2987 				     struct vmw_sw_context *sw_context,
2988 				     SVGA3dCmdHeader *header)
2989 {
2990 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2991 	struct {
2992 		SVGA3dCmdHeader header;
2993 		SVGA3dCmdDXDestroyShader body;
2994 	} *cmd = container_of(header, typeof(*cmd), header);
2995 	int ret;
2996 
2997 	if (!ctx_node) {
2998 		DRM_ERROR("DX Context not set.\n");
2999 		return -EINVAL;
3000 	}
3001 
3002 	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
3003 				&sw_context->staged_cmd_res);
3004 	if (ret)
3005 		DRM_ERROR("Could not find shader to remove.\n");
3006 
3007 	return ret;
3008 }
3009 
3010 /**
3011  * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
3012  * command
3013  *
3014  * @dev_priv: Pointer to a device private struct.
3015  * @sw_context: The software context being used for this batch.
3016  * @header: Pointer to the command header in the command stream.
3017  */
3018 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
3019 				  struct vmw_sw_context *sw_context,
3020 				  SVGA3dCmdHeader *header)
3021 {
3022 	struct vmw_resource_val_node *ctx_node;
3023 	struct vmw_resource_val_node *res_node;
3024 	struct vmw_resource *res;
3025 	struct {
3026 		SVGA3dCmdHeader header;
3027 		SVGA3dCmdDXBindShader body;
3028 	} *cmd = container_of(header, typeof(*cmd), header);
3029 	int ret;
3030 
3031 	if (cmd->body.cid != SVGA3D_INVALID_ID) {
3032 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
3033 					user_context_converter,
3034 					&cmd->body.cid, &ctx_node);
3035 		if (ret)
3036 			return ret;
3037 	} else {
3038 		ctx_node = sw_context->dx_ctx_node;
3039 		if (!ctx_node) {
3040 			DRM_ERROR("DX Context not set.\n");
3041 			return -EINVAL;
3042 		}
3043 	}
3044 
3045 	res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
3046 				cmd->body.shid, 0);
3047 	if (IS_ERR(res)) {
3048 		DRM_ERROR("Could not find shader to bind.\n");
3049 		return PTR_ERR(res);
3050 	}
3051 
3052 	ret = vmw_resource_val_add(sw_context, res, &res_node);
3053 	if (ret) {
3054 		DRM_ERROR("Error creating resource validation node.\n");
3055 		goto out_unref;
3056 	}
3057 
3058 
3059 	ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3060 					&cmd->body.mobid,
3061 					cmd->body.offsetInBytes);
3062 out_unref:
3063 	vmw_resource_unreference(&res);
3064 
3065 	return ret;
3066 }
3067 
3068 /**
3069  * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3070  *
3071  * @dev_priv: Pointer to a device private struct.
3072  * @sw_context: The software context being used for this batch.
3073  * @header: Pointer to the command header in the command stream.
3074  */
3075 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3076 			      struct vmw_sw_context *sw_context,
3077 			      SVGA3dCmdHeader *header)
3078 {
3079 	struct {
3080 		SVGA3dCmdHeader header;
3081 		SVGA3dCmdDXGenMips body;
3082 	} *cmd = container_of(header, typeof(*cmd), header);
3083 
3084 	return vmw_view_id_val_add(sw_context, vmw_view_sr,
3085 				   cmd->body.shaderResourceViewId);
3086 }
3087 
3088 /**
3089  * vmw_cmd_dx_transfer_from_buffer -
3090  * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3091  *
3092  * @dev_priv: Pointer to a device private struct.
3093  * @sw_context: The software context being used for this batch.
3094  * @header: Pointer to the command header in the command stream.
3095  */
3096 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3097 					   struct vmw_sw_context *sw_context,
3098 					   SVGA3dCmdHeader *header)
3099 {
3100 	struct {
3101 		SVGA3dCmdHeader header;
3102 		SVGA3dCmdDXTransferFromBuffer body;
3103 	} *cmd = container_of(header, typeof(*cmd), header);
3104 	int ret;
3105 
3106 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3107 				user_surface_converter,
3108 				&cmd->body.srcSid, NULL);
3109 	if (ret != 0)
3110 		return ret;
3111 
3112 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3113 				 user_surface_converter,
3114 				 &cmd->body.destSid, NULL);
3115 }
3116 
3117 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3118 				struct vmw_sw_context *sw_context,
3119 				void *buf, uint32_t *size)
3120 {
3121 	uint32_t size_remaining = *size;
3122 	uint32_t cmd_id;
3123 
3124 	cmd_id = ((uint32_t *)buf)[0];
3125 	switch (cmd_id) {
3126 	case SVGA_CMD_UPDATE:
3127 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3128 		break;
3129 	case SVGA_CMD_DEFINE_GMRFB:
3130 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3131 		break;
3132 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3133 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3134 		break;
3135 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3136 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3137 		break;
3138 	default:
3139 		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3140 		return -EINVAL;
3141 	}
3142 
3143 	if (*size > size_remaining) {
3144 		DRM_ERROR("Invalid SVGA command (size mismatch):"
3145 			  " %u.\n", cmd_id);
3146 		return -EINVAL;
3147 	}
3148 
3149 	if (unlikely(!sw_context->kernel)) {
3150 		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3151 		return -EPERM;
3152 	}
3153 
3154 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3155 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3156 
3157 	return 0;
3158 }
3159 
3160 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3161 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3162 		    false, false, false),
3163 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3164 		    false, false, false),
3165 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3166 		    true, false, false),
3167 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3168 		    true, false, false),
3169 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3170 		    true, false, false),
3171 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3172 		    false, false, false),
3173 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3174 		    false, false, false),
3175 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3176 		    true, false, false),
3177 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3178 		    true, false, false),
3179 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3180 		    true, false, false),
3181 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3182 		    &vmw_cmd_set_render_target_check, true, false, false),
3183 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3184 		    true, false, false),
3185 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3186 		    true, false, false),
3187 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3188 		    true, false, false),
3189 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3190 		    true, false, false),
3191 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3192 		    true, false, false),
3193 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3194 		    true, false, false),
3195 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3196 		    true, false, false),
3197 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3198 		    false, false, false),
3199 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3200 		    true, false, false),
3201 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3202 		    true, false, false),
3203 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3204 		    true, false, false),
3205 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3206 		    true, false, false),
3207 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3208 		    true, false, false),
3209 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3210 		    true, false, false),
3211 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3212 		    true, false, false),
3213 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3214 		    true, false, false),
3215 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3216 		    true, false, false),
3217 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3218 		    true, false, false),
3219 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3220 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3221 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3222 		    false, false, false),
3223 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3224 		    false, false, false),
3225 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3226 		    false, false, false),
3227 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3228 		    false, false, false),
3229 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3230 		    false, false, false),
3231 	VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3232 		    false, false, false),
3233 	VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3234 		    false, false, false),
3235 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3236 		    false, false, false),
3237 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3238 		    false, false, false),
3239 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3240 		    false, false, false),
3241 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3242 		    false, false, false),
3243 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3244 		    false, false, false),
3245 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3246 		    false, false, false),
3247 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3248 		    false, false, true),
3249 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3250 		    false, false, true),
3251 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3252 		    false, false, true),
3253 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3254 		    false, false, true),
3255 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3256 		    false, false, true),
3257 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3258 		    false, false, true),
3259 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3260 		    false, false, true),
3261 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3262 		    false, false, true),
3263 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3264 		    true, false, true),
3265 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3266 		    false, false, true),
3267 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3268 		    true, false, true),
3269 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3270 		    &vmw_cmd_update_gb_surface, true, false, true),
3271 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3272 		    &vmw_cmd_readback_gb_image, true, false, true),
3273 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3274 		    &vmw_cmd_readback_gb_surface, true, false, true),
3275 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3276 		    &vmw_cmd_invalidate_gb_image, true, false, true),
3277 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3278 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3279 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3280 		    false, false, true),
3281 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3282 		    false, false, true),
3283 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3284 		    false, false, true),
3285 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3286 		    false, false, true),
3287 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3288 		    false, false, true),
3289 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3290 		    false, false, true),
3291 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3292 		    true, false, true),
3293 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3294 		    false, false, true),
3295 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3296 		    false, false, false),
3297 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3298 		    true, false, true),
3299 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3300 		    true, false, true),
3301 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3302 		    true, false, true),
3303 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3304 		    true, false, true),
3305 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3306 		    false, false, true),
3307 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3308 		    false, false, true),
3309 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3310 		    false, false, true),
3311 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3312 		    false, false, true),
3313 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3314 		    false, false, true),
3315 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3316 		    false, false, true),
3317 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3318 		    false, false, true),
3319 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3320 		    false, false, true),
3321 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3322 		    false, false, true),
3323 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3324 		    false, false, true),
3325 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3326 		    true, false, true),
3327 	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3328 		    false, false, true),
3329 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3330 		    false, false, true),
3331 	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3332 		    false, false, true),
3333 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3334 		    false, false, true),
3335 
3336 	/*
3337 	 * DX commands
3338 	 */
3339 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3340 		    false, false, true),
3341 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3342 		    false, false, true),
3343 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3344 		    false, false, true),
3345 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3346 		    false, false, true),
3347 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3348 		    false, false, true),
3349 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3350 		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3351 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3352 		    &vmw_cmd_dx_set_shader_res, true, false, true),
3353 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3354 		    true, false, true),
3355 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3356 		    true, false, true),
3357 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3358 		    true, false, true),
3359 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3360 		    true, false, true),
3361 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3362 		    true, false, true),
3363 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3364 		    &vmw_cmd_dx_cid_check, true, false, true),
3365 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3366 		    true, false, true),
3367 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3368 		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3369 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3370 		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3371 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3372 		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3373 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3374 		    true, false, true),
3375 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3376 		    &vmw_cmd_dx_cid_check, true, false, true),
3377 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3378 		    &vmw_cmd_dx_cid_check, true, false, true),
3379 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3380 		    true, false, true),
3381 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3382 		    true, false, true),
3383 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3384 		    true, false, true),
3385 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3386 		    &vmw_cmd_dx_cid_check, true, false, true),
3387 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3388 		    true, false, true),
3389 	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3390 		    true, false, true),
3391 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3392 		    true, false, true),
3393 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3394 		    true, false, true),
3395 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3396 		    true, false, true),
3397 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3398 		    true, false, true),
3399 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3400 		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3401 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3402 		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3403 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3404 		    true, false, true),
3405 	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3406 		    true, false, true),
3407 	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3408 		    &vmw_cmd_dx_check_subresource, true, false, true),
3409 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3410 		    &vmw_cmd_dx_check_subresource, true, false, true),
3411 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3412 		    &vmw_cmd_dx_check_subresource, true, false, true),
3413 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3414 		    &vmw_cmd_dx_view_define, true, false, true),
3415 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3416 		    &vmw_cmd_dx_view_remove, true, false, true),
3417 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3418 		    &vmw_cmd_dx_view_define, true, false, true),
3419 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3420 		    &vmw_cmd_dx_view_remove, true, false, true),
3421 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3422 		    &vmw_cmd_dx_view_define, true, false, true),
3423 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3424 		    &vmw_cmd_dx_view_remove, true, false, true),
3425 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3426 		    &vmw_cmd_dx_so_define, true, false, true),
3427 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3428 		    &vmw_cmd_dx_cid_check, true, false, true),
3429 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3430 		    &vmw_cmd_dx_so_define, true, false, true),
3431 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3432 		    &vmw_cmd_dx_cid_check, true, false, true),
3433 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3434 		    &vmw_cmd_dx_so_define, true, false, true),
3435 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3436 		    &vmw_cmd_dx_cid_check, true, false, true),
3437 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3438 		    &vmw_cmd_dx_so_define, true, false, true),
3439 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3440 		    &vmw_cmd_dx_cid_check, true, false, true),
3441 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3442 		    &vmw_cmd_dx_so_define, true, false, true),
3443 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3444 		    &vmw_cmd_dx_cid_check, true, false, true),
3445 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3446 		    &vmw_cmd_dx_define_shader, true, false, true),
3447 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3448 		    &vmw_cmd_dx_destroy_shader, true, false, true),
3449 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3450 		    &vmw_cmd_dx_bind_shader, true, false, true),
3451 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3452 		    &vmw_cmd_dx_so_define, true, false, true),
3453 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3454 		    &vmw_cmd_dx_cid_check, true, false, true),
3455 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3456 		    true, false, true),
3457 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3458 		    &vmw_cmd_dx_set_so_targets, true, false, true),
3459 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3460 		    &vmw_cmd_dx_cid_check, true, false, true),
3461 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3462 		    &vmw_cmd_dx_cid_check, true, false, true),
3463 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3464 		    &vmw_cmd_buffer_copy_check, true, false, true),
3465 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3466 		    &vmw_cmd_pred_copy_check, true, false, true),
3467 	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3468 		    &vmw_cmd_dx_transfer_from_buffer,
3469 		    true, false, true),
3470 };
3471 
3472 static int vmw_cmd_check(struct vmw_private *dev_priv,
3473 			 struct vmw_sw_context *sw_context,
3474 			 void *buf, uint32_t *size)
3475 {
3476 	uint32_t cmd_id;
3477 	uint32_t size_remaining = *size;
3478 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3479 	int ret;
3480 	const struct vmw_cmd_entry *entry;
3481 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3482 
3483 	cmd_id = ((uint32_t *)buf)[0];
3484 	/* Handle any none 3D commands */
3485 	if (unlikely(cmd_id < SVGA_CMD_MAX))
3486 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3487 
3488 
3489 	cmd_id = header->id;
3490 	*size = header->size + sizeof(SVGA3dCmdHeader);
3491 
3492 	cmd_id -= SVGA_3D_CMD_BASE;
3493 	if (unlikely(*size > size_remaining))
3494 		goto out_invalid;
3495 
3496 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3497 		goto out_invalid;
3498 
3499 	entry = &vmw_cmd_entries[cmd_id];
3500 	if (unlikely(!entry->func))
3501 		goto out_invalid;
3502 
3503 	if (unlikely(!entry->user_allow && !sw_context->kernel))
3504 		goto out_privileged;
3505 
3506 	if (unlikely(entry->gb_disable && gb))
3507 		goto out_old;
3508 
3509 	if (unlikely(entry->gb_enable && !gb))
3510 		goto out_new;
3511 
3512 	ret = entry->func(dev_priv, sw_context, header);
3513 	if (unlikely(ret != 0))
3514 		goto out_invalid;
3515 
3516 	return 0;
3517 out_invalid:
3518 	DRM_ERROR("Invalid SVGA3D command: %d\n",
3519 		  cmd_id + SVGA_3D_CMD_BASE);
3520 	return -EINVAL;
3521 out_privileged:
3522 	DRM_ERROR("Privileged SVGA3D command: %d\n",
3523 		  cmd_id + SVGA_3D_CMD_BASE);
3524 	return -EPERM;
3525 out_old:
3526 	DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3527 		  cmd_id + SVGA_3D_CMD_BASE);
3528 	return -EINVAL;
3529 out_new:
3530 	DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3531 		  cmd_id + SVGA_3D_CMD_BASE);
3532 	return -EINVAL;
3533 }
3534 
3535 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3536 			     struct vmw_sw_context *sw_context,
3537 			     void *buf,
3538 			     uint32_t size)
3539 {
3540 	int32_t cur_size = size;
3541 	int ret;
3542 
3543 	sw_context->buf_start = buf;
3544 
3545 	while (cur_size > 0) {
3546 		size = cur_size;
3547 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3548 		if (unlikely(ret != 0))
3549 			return ret;
3550 		buf = (void *)((unsigned long) buf + size);
3551 		cur_size -= size;
3552 	}
3553 
3554 	if (unlikely(cur_size != 0)) {
3555 		DRM_ERROR("Command verifier out of sync.\n");
3556 		return -EINVAL;
3557 	}
3558 
3559 	return 0;
3560 }
3561 
3562 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3563 {
3564 	sw_context->cur_reloc = 0;
3565 }
3566 
3567 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3568 {
3569 	uint32_t i;
3570 	struct vmw_relocation *reloc;
3571 	struct ttm_validate_buffer *validate;
3572 	struct ttm_buffer_object *bo;
3573 
3574 	for (i = 0; i < sw_context->cur_reloc; ++i) {
3575 		reloc = &sw_context->relocs[i];
3576 		validate = &sw_context->val_bufs[reloc->index].base;
3577 		bo = validate->bo;
3578 		switch (bo->mem.mem_type) {
3579 		case TTM_PL_VRAM:
3580 			reloc->location->offset += bo->offset;
3581 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3582 			break;
3583 		case VMW_PL_GMR:
3584 			reloc->location->gmrId = bo->mem.start;
3585 			break;
3586 		case VMW_PL_MOB:
3587 			*reloc->mob_loc = bo->mem.start;
3588 			break;
3589 		default:
3590 			BUG();
3591 		}
3592 	}
3593 	vmw_free_relocations(sw_context);
3594 }
3595 
3596 /**
3597  * vmw_resource_list_unrefererence - Free up a resource list and unreference
3598  * all resources referenced by it.
3599  *
3600  * @list: The resource list.
3601  */
3602 static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3603 					  struct list_head *list)
3604 {
3605 	struct vmw_resource_val_node *val, *val_next;
3606 
3607 	/*
3608 	 * Drop references to resources held during command submission.
3609 	 */
3610 
3611 	list_for_each_entry_safe(val, val_next, list, head) {
3612 		list_del_init(&val->head);
3613 		vmw_resource_unreference(&val->res);
3614 
3615 		if (val->staged_bindings) {
3616 			if (val->staged_bindings != sw_context->staged_bindings)
3617 				vmw_binding_state_free(val->staged_bindings);
3618 			else
3619 				sw_context->staged_bindings_inuse = false;
3620 			val->staged_bindings = NULL;
3621 		}
3622 
3623 		kfree(val);
3624 	}
3625 }
3626 
3627 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3628 {
3629 	struct vmw_validate_buffer *entry, *next;
3630 	struct vmw_resource_val_node *val;
3631 
3632 	/*
3633 	 * Drop references to DMA buffers held during command submission.
3634 	 */
3635 	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3636 				 base.head) {
3637 		list_del(&entry->base.head);
3638 		ttm_bo_unref(&entry->base.bo);
3639 		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3640 		sw_context->cur_val_buf--;
3641 	}
3642 	BUG_ON(sw_context->cur_val_buf != 0);
3643 
3644 	list_for_each_entry(val, &sw_context->resource_list, head)
3645 		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3646 }
3647 
3648 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3649 			       struct ttm_buffer_object *bo,
3650 			       bool interruptible,
3651 			       bool validate_as_mob)
3652 {
3653 	struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3654 						  base);
3655 	int ret;
3656 
3657 	if (vbo->pin_count > 0)
3658 		return 0;
3659 
3660 	if (validate_as_mob)
3661 		return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3662 				       false);
3663 
3664 	/**
3665 	 * Put BO in VRAM if there is space, otherwise as a GMR.
3666 	 * If there is no space in VRAM and GMR ids are all used up,
3667 	 * start evicting GMRs to make room. If the DMA buffer can't be
3668 	 * used as a GMR, this will return -ENOMEM.
3669 	 */
3670 
3671 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3672 			      false);
3673 	if (likely(ret == 0 || ret == -ERESTARTSYS))
3674 		return ret;
3675 
3676 	/**
3677 	 * If that failed, try VRAM again, this time evicting
3678 	 * previous contents.
3679 	 */
3680 
3681 	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3682 	return ret;
3683 }
3684 
3685 static int vmw_validate_buffers(struct vmw_private *dev_priv,
3686 				struct vmw_sw_context *sw_context)
3687 {
3688 	struct vmw_validate_buffer *entry;
3689 	int ret;
3690 
3691 	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3692 		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3693 						 true,
3694 						 entry->validate_as_mob);
3695 		if (unlikely(ret != 0))
3696 			return ret;
3697 	}
3698 	return 0;
3699 }
3700 
3701 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3702 				 uint32_t size)
3703 {
3704 	if (likely(sw_context->cmd_bounce_size >= size))
3705 		return 0;
3706 
3707 	if (sw_context->cmd_bounce_size == 0)
3708 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3709 
3710 	while (sw_context->cmd_bounce_size < size) {
3711 		sw_context->cmd_bounce_size =
3712 			PAGE_ALIGN(sw_context->cmd_bounce_size +
3713 				   (sw_context->cmd_bounce_size >> 1));
3714 	}
3715 
3716 	vfree(sw_context->cmd_bounce);
3717 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3718 
3719 	if (sw_context->cmd_bounce == NULL) {
3720 		DRM_ERROR("Failed to allocate command bounce buffer.\n");
3721 		sw_context->cmd_bounce_size = 0;
3722 		return -ENOMEM;
3723 	}
3724 
3725 	return 0;
3726 }
3727 
3728 /**
3729  * vmw_execbuf_fence_commands - create and submit a command stream fence
3730  *
3731  * Creates a fence object and submits a command stream marker.
3732  * If this fails for some reason, We sync the fifo and return NULL.
3733  * It is then safe to fence buffers with a NULL pointer.
3734  *
3735  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3736  * a userspace handle if @p_handle is not NULL, otherwise not.
3737  */
3738 
3739 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3740 			       struct vmw_private *dev_priv,
3741 			       struct vmw_fence_obj **p_fence,
3742 			       uint32_t *p_handle)
3743 {
3744 	uint32_t sequence;
3745 	int ret;
3746 	bool synced = false;
3747 
3748 	/* p_handle implies file_priv. */
3749 	BUG_ON(p_handle != NULL && file_priv == NULL);
3750 
3751 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
3752 	if (unlikely(ret != 0)) {
3753 		DRM_ERROR("Fence submission error. Syncing.\n");
3754 		synced = true;
3755 	}
3756 
3757 	if (p_handle != NULL)
3758 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3759 					    sequence, p_fence, p_handle);
3760 	else
3761 		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3762 
3763 	if (unlikely(ret != 0 && !synced)) {
3764 		(void) vmw_fallback_wait(dev_priv, false, false,
3765 					 sequence, false,
3766 					 VMW_FENCE_WAIT_TIMEOUT);
3767 		*p_fence = NULL;
3768 	}
3769 
3770 	return 0;
3771 }
3772 
3773 /**
3774  * vmw_execbuf_copy_fence_user - copy fence object information to
3775  * user-space.
3776  *
3777  * @dev_priv: Pointer to a vmw_private struct.
3778  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3779  * @ret: Return value from fence object creation.
3780  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3781  * which the information should be copied.
3782  * @fence: Pointer to the fenc object.
3783  * @fence_handle: User-space fence handle.
3784  *
3785  * This function copies fence information to user-space. If copying fails,
3786  * The user-space struct drm_vmw_fence_rep::error member is hopefully
3787  * left untouched, and if it's preloaded with an -EFAULT by user-space,
3788  * the error will hopefully be detected.
3789  * Also if copying fails, user-space will be unable to signal the fence
3790  * object so we wait for it immediately, and then unreference the
3791  * user-space reference.
3792  */
3793 void
3794 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3795 			    struct vmw_fpriv *vmw_fp,
3796 			    int ret,
3797 			    struct drm_vmw_fence_rep __user *user_fence_rep,
3798 			    struct vmw_fence_obj *fence,
3799 			    uint32_t fence_handle)
3800 {
3801 	struct drm_vmw_fence_rep fence_rep;
3802 
3803 	if (user_fence_rep == NULL)
3804 		return;
3805 
3806 	memset(&fence_rep, 0, sizeof(fence_rep));
3807 
3808 	fence_rep.error = ret;
3809 	if (ret == 0) {
3810 		BUG_ON(fence == NULL);
3811 
3812 		fence_rep.handle = fence_handle;
3813 		fence_rep.seqno = fence->base.seqno;
3814 		vmw_update_seqno(dev_priv, &dev_priv->fifo);
3815 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3816 	}
3817 
3818 	/*
3819 	 * copy_to_user errors will be detected by user space not
3820 	 * seeing fence_rep::error filled in. Typically
3821 	 * user-space would have pre-set that member to -EFAULT.
3822 	 */
3823 	ret = copy_to_user(user_fence_rep, &fence_rep,
3824 			   sizeof(fence_rep));
3825 
3826 	/*
3827 	 * User-space lost the fence object. We need to sync
3828 	 * and unreference the handle.
3829 	 */
3830 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3831 		ttm_ref_object_base_unref(vmw_fp->tfile,
3832 					  fence_handle, TTM_REF_USAGE);
3833 		DRM_ERROR("Fence copy error. Syncing.\n");
3834 		(void) vmw_fence_obj_wait(fence, false, false,
3835 					  VMW_FENCE_WAIT_TIMEOUT);
3836 	}
3837 }
3838 
3839 /**
3840  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3841  * the fifo.
3842  *
3843  * @dev_priv: Pointer to a device private structure.
3844  * @kernel_commands: Pointer to the unpatched command batch.
3845  * @command_size: Size of the unpatched command batch.
3846  * @sw_context: Structure holding the relocation lists.
3847  *
3848  * Side effects: If this function returns 0, then the command batch
3849  * pointed to by @kernel_commands will have been modified.
3850  */
3851 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3852 				   void *kernel_commands,
3853 				   u32 command_size,
3854 				   struct vmw_sw_context *sw_context)
3855 {
3856 	void *cmd;
3857 
3858 	if (sw_context->dx_ctx_node)
3859 		cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3860 					  sw_context->dx_ctx_node->res->id);
3861 	else
3862 		cmd = vmw_fifo_reserve(dev_priv, command_size);
3863 	if (!cmd) {
3864 		DRM_ERROR("Failed reserving fifo space for commands.\n");
3865 		return -ENOMEM;
3866 	}
3867 
3868 	vmw_apply_relocations(sw_context);
3869 	memcpy(cmd, kernel_commands, command_size);
3870 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3871 	vmw_resource_relocations_free(&sw_context->res_relocations);
3872 	vmw_fifo_commit(dev_priv, command_size);
3873 
3874 	return 0;
3875 }
3876 
3877 /**
3878  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3879  * the command buffer manager.
3880  *
3881  * @dev_priv: Pointer to a device private structure.
3882  * @header: Opaque handle to the command buffer allocation.
3883  * @command_size: Size of the unpatched command batch.
3884  * @sw_context: Structure holding the relocation lists.
3885  *
3886  * Side effects: If this function returns 0, then the command buffer
3887  * represented by @header will have been modified.
3888  */
3889 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3890 				     struct vmw_cmdbuf_header *header,
3891 				     u32 command_size,
3892 				     struct vmw_sw_context *sw_context)
3893 {
3894 	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3895 		  SVGA3D_INVALID_ID);
3896 	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3897 				       id, false, header);
3898 
3899 	vmw_apply_relocations(sw_context);
3900 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3901 	vmw_resource_relocations_free(&sw_context->res_relocations);
3902 	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3903 
3904 	return 0;
3905 }
3906 
3907 /**
3908  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3909  * submission using a command buffer.
3910  *
3911  * @dev_priv: Pointer to a device private structure.
3912  * @user_commands: User-space pointer to the commands to be submitted.
3913  * @command_size: Size of the unpatched command batch.
3914  * @header: Out parameter returning the opaque pointer to the command buffer.
3915  *
3916  * This function checks whether we can use the command buffer manager for
3917  * submission and if so, creates a command buffer of suitable size and
3918  * copies the user data into that buffer.
3919  *
3920  * On successful return, the function returns a pointer to the data in the
3921  * command buffer and *@header is set to non-NULL.
3922  * If command buffers could not be used, the function will return the value
3923  * of @kernel_commands on function call. That value may be NULL. In that case,
3924  * the value of *@header will be set to NULL.
3925  * If an error is encountered, the function will return a pointer error value.
3926  * If the function is interrupted by a signal while sleeping, it will return
3927  * -ERESTARTSYS casted to a pointer error value.
3928  */
3929 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3930 				void __user *user_commands,
3931 				void *kernel_commands,
3932 				u32 command_size,
3933 				struct vmw_cmdbuf_header **header)
3934 {
3935 	size_t cmdbuf_size;
3936 	int ret;
3937 
3938 	*header = NULL;
3939 	if (command_size > SVGA_CB_MAX_SIZE) {
3940 		DRM_ERROR("Command buffer is too large.\n");
3941 		return ERR_PTR(-EINVAL);
3942 	}
3943 
3944 	if (!dev_priv->cman || kernel_commands)
3945 		return kernel_commands;
3946 
3947 	/* If possible, add a little space for fencing. */
3948 	cmdbuf_size = command_size + 512;
3949 	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3950 	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3951 					   true, header);
3952 	if (IS_ERR(kernel_commands))
3953 		return kernel_commands;
3954 
3955 	ret = copy_from_user(kernel_commands, user_commands,
3956 			     command_size);
3957 	if (ret) {
3958 		DRM_ERROR("Failed copying commands.\n");
3959 		vmw_cmdbuf_header_free(*header);
3960 		*header = NULL;
3961 		return ERR_PTR(-EFAULT);
3962 	}
3963 
3964 	return kernel_commands;
3965 }
3966 
3967 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3968 				   struct vmw_sw_context *sw_context,
3969 				   uint32_t handle)
3970 {
3971 	struct vmw_resource_val_node *ctx_node;
3972 	struct vmw_resource *res;
3973 	int ret;
3974 
3975 	if (handle == SVGA3D_INVALID_ID)
3976 		return 0;
3977 
3978 	ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3979 					      handle, user_context_converter,
3980 					      &res);
3981 	if (unlikely(ret != 0)) {
3982 		DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3983 			  (unsigned) handle);
3984 		return ret;
3985 	}
3986 
3987 	ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3988 	if (unlikely(ret != 0))
3989 		goto out_err;
3990 
3991 	sw_context->dx_ctx_node = ctx_node;
3992 	sw_context->man = vmw_context_res_man(res);
3993 out_err:
3994 	vmw_resource_unreference(&res);
3995 	return ret;
3996 }
3997 
3998 int vmw_execbuf_process(struct drm_file *file_priv,
3999 			struct vmw_private *dev_priv,
4000 			void __user *user_commands,
4001 			void *kernel_commands,
4002 			uint32_t command_size,
4003 			uint64_t throttle_us,
4004 			uint32_t dx_context_handle,
4005 			struct drm_vmw_fence_rep __user *user_fence_rep,
4006 			struct vmw_fence_obj **out_fence)
4007 {
4008 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
4009 	struct vmw_fence_obj *fence = NULL;
4010 	struct vmw_resource *error_resource;
4011 	struct list_head resource_list;
4012 	struct vmw_cmdbuf_header *header;
4013 	struct ww_acquire_ctx ticket;
4014 	uint32_t handle;
4015 	int ret;
4016 
4017 	if (throttle_us) {
4018 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4019 				   throttle_us);
4020 
4021 		if (ret)
4022 			return ret;
4023 	}
4024 
4025 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4026 					     kernel_commands, command_size,
4027 					     &header);
4028 	if (IS_ERR(kernel_commands))
4029 		return PTR_ERR(kernel_commands);
4030 
4031 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4032 	if (ret) {
4033 		ret = -ERESTARTSYS;
4034 		goto out_free_header;
4035 	}
4036 
4037 	sw_context->kernel = false;
4038 	if (kernel_commands == NULL) {
4039 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
4040 		if (unlikely(ret != 0))
4041 			goto out_unlock;
4042 
4043 
4044 		ret = copy_from_user(sw_context->cmd_bounce,
4045 				     user_commands, command_size);
4046 
4047 		if (unlikely(ret != 0)) {
4048 			ret = -EFAULT;
4049 			DRM_ERROR("Failed copying commands.\n");
4050 			goto out_unlock;
4051 		}
4052 		kernel_commands = sw_context->cmd_bounce;
4053 	} else if (!header)
4054 		sw_context->kernel = true;
4055 
4056 	sw_context->fp = vmw_fpriv(file_priv);
4057 	sw_context->cur_reloc = 0;
4058 	sw_context->cur_val_buf = 0;
4059 	INIT_LIST_HEAD(&sw_context->resource_list);
4060 	INIT_LIST_HEAD(&sw_context->ctx_resource_list);
4061 	sw_context->cur_query_bo = dev_priv->pinned_bo;
4062 	sw_context->last_query_ctx = NULL;
4063 	sw_context->needs_post_query_barrier = false;
4064 	sw_context->dx_ctx_node = NULL;
4065 	sw_context->dx_query_mob = NULL;
4066 	sw_context->dx_query_ctx = NULL;
4067 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4068 	INIT_LIST_HEAD(&sw_context->validate_nodes);
4069 	INIT_LIST_HEAD(&sw_context->res_relocations);
4070 	if (sw_context->staged_bindings)
4071 		vmw_binding_state_reset(sw_context->staged_bindings);
4072 
4073 	if (!sw_context->res_ht_initialized) {
4074 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4075 		if (unlikely(ret != 0))
4076 			goto out_unlock;
4077 		sw_context->res_ht_initialized = true;
4078 	}
4079 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4080 	INIT_LIST_HEAD(&resource_list);
4081 	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4082 	if (unlikely(ret != 0)) {
4083 		list_splice_init(&sw_context->ctx_resource_list,
4084 				 &sw_context->resource_list);
4085 		goto out_err_nores;
4086 	}
4087 
4088 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4089 				command_size);
4090 	/*
4091 	 * Merge the resource lists before checking the return status
4092 	 * from vmd_cmd_check_all so that all the open hashtabs will
4093 	 * be handled properly even if vmw_cmd_check_all fails.
4094 	 */
4095 	list_splice_init(&sw_context->ctx_resource_list,
4096 			 &sw_context->resource_list);
4097 
4098 	if (unlikely(ret != 0))
4099 		goto out_err_nores;
4100 
4101 	ret = vmw_resources_reserve(sw_context);
4102 	if (unlikely(ret != 0))
4103 		goto out_err_nores;
4104 
4105 	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4106 				     true, NULL);
4107 	if (unlikely(ret != 0))
4108 		goto out_err_nores;
4109 
4110 	ret = vmw_validate_buffers(dev_priv, sw_context);
4111 	if (unlikely(ret != 0))
4112 		goto out_err;
4113 
4114 	ret = vmw_resources_validate(sw_context);
4115 	if (unlikely(ret != 0))
4116 		goto out_err;
4117 
4118 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4119 	if (unlikely(ret != 0)) {
4120 		ret = -ERESTARTSYS;
4121 		goto out_err;
4122 	}
4123 
4124 	if (dev_priv->has_mob) {
4125 		ret = vmw_rebind_contexts(sw_context);
4126 		if (unlikely(ret != 0))
4127 			goto out_unlock_binding;
4128 	}
4129 
4130 	if (!header) {
4131 		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4132 					      command_size, sw_context);
4133 	} else {
4134 		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4135 						sw_context);
4136 		header = NULL;
4137 	}
4138 	mutex_unlock(&dev_priv->binding_mutex);
4139 	if (ret)
4140 		goto out_err;
4141 
4142 	vmw_query_bo_switch_commit(dev_priv, sw_context);
4143 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4144 					 &fence,
4145 					 (user_fence_rep) ? &handle : NULL);
4146 	/*
4147 	 * This error is harmless, because if fence submission fails,
4148 	 * vmw_fifo_send_fence will sync. The error will be propagated to
4149 	 * user-space in @fence_rep
4150 	 */
4151 
4152 	if (ret != 0)
4153 		DRM_ERROR("Fence submission error. Syncing.\n");
4154 
4155 	vmw_resources_unreserve(sw_context, false);
4156 
4157 	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4158 				    (void *) fence);
4159 
4160 	if (unlikely(dev_priv->pinned_bo != NULL &&
4161 		     !dev_priv->query_cid_valid))
4162 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4163 
4164 	vmw_clear_validations(sw_context);
4165 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4166 				    user_fence_rep, fence, handle);
4167 
4168 	/* Don't unreference when handing fence out */
4169 	if (unlikely(out_fence != NULL)) {
4170 		*out_fence = fence;
4171 		fence = NULL;
4172 	} else if (likely(fence != NULL)) {
4173 		vmw_fence_obj_unreference(&fence);
4174 	}
4175 
4176 	list_splice_init(&sw_context->resource_list, &resource_list);
4177 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4178 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4179 
4180 	/*
4181 	 * Unreference resources outside of the cmdbuf_mutex to
4182 	 * avoid deadlocks in resource destruction paths.
4183 	 */
4184 	vmw_resource_list_unreference(sw_context, &resource_list);
4185 
4186 	return 0;
4187 
4188 out_unlock_binding:
4189 	mutex_unlock(&dev_priv->binding_mutex);
4190 out_err:
4191 	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4192 out_err_nores:
4193 	vmw_resources_unreserve(sw_context, true);
4194 	vmw_resource_relocations_free(&sw_context->res_relocations);
4195 	vmw_free_relocations(sw_context);
4196 	vmw_clear_validations(sw_context);
4197 	if (unlikely(dev_priv->pinned_bo != NULL &&
4198 		     !dev_priv->query_cid_valid))
4199 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4200 out_unlock:
4201 	list_splice_init(&sw_context->resource_list, &resource_list);
4202 	error_resource = sw_context->error_resource;
4203 	sw_context->error_resource = NULL;
4204 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4205 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4206 
4207 	/*
4208 	 * Unreference resources outside of the cmdbuf_mutex to
4209 	 * avoid deadlocks in resource destruction paths.
4210 	 */
4211 	vmw_resource_list_unreference(sw_context, &resource_list);
4212 	if (unlikely(error_resource != NULL))
4213 		vmw_resource_unreference(&error_resource);
4214 out_free_header:
4215 	if (header)
4216 		vmw_cmdbuf_header_free(header);
4217 
4218 	return ret;
4219 }
4220 
4221 /**
4222  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4223  *
4224  * @dev_priv: The device private structure.
4225  *
4226  * This function is called to idle the fifo and unpin the query buffer
4227  * if the normal way to do this hits an error, which should typically be
4228  * extremely rare.
4229  */
4230 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4231 {
4232 	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4233 
4234 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4235 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4236 	if (dev_priv->dummy_query_bo_pinned) {
4237 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4238 		dev_priv->dummy_query_bo_pinned = false;
4239 	}
4240 }
4241 
4242 
4243 /**
4244  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4245  * query bo.
4246  *
4247  * @dev_priv: The device private structure.
4248  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4249  * _after_ a query barrier that flushes all queries touching the current
4250  * buffer pointed to by @dev_priv->pinned_bo
4251  *
4252  * This function should be used to unpin the pinned query bo, or
4253  * as a query barrier when we need to make sure that all queries have
4254  * finished before the next fifo command. (For example on hardware
4255  * context destructions where the hardware may otherwise leak unfinished
4256  * queries).
4257  *
4258  * This function does not return any failure codes, but make attempts
4259  * to do safe unpinning in case of errors.
4260  *
4261  * The function will synchronize on the previous query barrier, and will
4262  * thus not finish until that barrier has executed.
4263  *
4264  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4265  * before calling this function.
4266  */
4267 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4268 				     struct vmw_fence_obj *fence)
4269 {
4270 	int ret = 0;
4271 	struct list_head validate_list;
4272 	struct ttm_validate_buffer pinned_val, query_val;
4273 	struct vmw_fence_obj *lfence = NULL;
4274 	struct ww_acquire_ctx ticket;
4275 
4276 	if (dev_priv->pinned_bo == NULL)
4277 		goto out_unlock;
4278 
4279 	INIT_LIST_HEAD(&validate_list);
4280 
4281 	pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4282 	pinned_val.shared = false;
4283 	list_add_tail(&pinned_val.head, &validate_list);
4284 
4285 	query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4286 	query_val.shared = false;
4287 	list_add_tail(&query_val.head, &validate_list);
4288 
4289 	ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4290 				     false, NULL);
4291 	if (unlikely(ret != 0)) {
4292 		vmw_execbuf_unpin_panic(dev_priv);
4293 		goto out_no_reserve;
4294 	}
4295 
4296 	if (dev_priv->query_cid_valid) {
4297 		BUG_ON(fence != NULL);
4298 		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4299 		if (unlikely(ret != 0)) {
4300 			vmw_execbuf_unpin_panic(dev_priv);
4301 			goto out_no_emit;
4302 		}
4303 		dev_priv->query_cid_valid = false;
4304 	}
4305 
4306 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4307 	if (dev_priv->dummy_query_bo_pinned) {
4308 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4309 		dev_priv->dummy_query_bo_pinned = false;
4310 	}
4311 	if (fence == NULL) {
4312 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4313 						  NULL);
4314 		fence = lfence;
4315 	}
4316 	ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4317 	if (lfence != NULL)
4318 		vmw_fence_obj_unreference(&lfence);
4319 
4320 	ttm_bo_unref(&query_val.bo);
4321 	ttm_bo_unref(&pinned_val.bo);
4322 	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4323 out_unlock:
4324 	return;
4325 
4326 out_no_emit:
4327 	ttm_eu_backoff_reservation(&ticket, &validate_list);
4328 out_no_reserve:
4329 	ttm_bo_unref(&query_val.bo);
4330 	ttm_bo_unref(&pinned_val.bo);
4331 	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4332 }
4333 
4334 /**
4335  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4336  * query bo.
4337  *
4338  * @dev_priv: The device private structure.
4339  *
4340  * This function should be used to unpin the pinned query bo, or
4341  * as a query barrier when we need to make sure that all queries have
4342  * finished before the next fifo command. (For example on hardware
4343  * context destructions where the hardware may otherwise leak unfinished
4344  * queries).
4345  *
4346  * This function does not return any failure codes, but make attempts
4347  * to do safe unpinning in case of errors.
4348  *
4349  * The function will synchronize on the previous query barrier, and will
4350  * thus not finish until that barrier has executed.
4351  */
4352 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4353 {
4354 	mutex_lock(&dev_priv->cmdbuf_mutex);
4355 	if (dev_priv->query_cid_valid)
4356 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4357 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4358 }
4359 
4360 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4361 		      struct drm_file *file_priv, size_t size)
4362 {
4363 	struct vmw_private *dev_priv = vmw_priv(dev);
4364 	struct drm_vmw_execbuf_arg arg;
4365 	int ret;
4366 	static const size_t copy_offset[] = {
4367 		offsetof(struct drm_vmw_execbuf_arg, context_handle),
4368 		sizeof(struct drm_vmw_execbuf_arg)};
4369 
4370 	if (unlikely(size < copy_offset[0])) {
4371 		DRM_ERROR("Invalid command size, ioctl %d\n",
4372 			  DRM_VMW_EXECBUF);
4373 		return -EINVAL;
4374 	}
4375 
4376 	if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4377 		return -EFAULT;
4378 
4379 	/*
4380 	 * Extend the ioctl argument while
4381 	 * maintaining backwards compatibility:
4382 	 * We take different code paths depending on the value of
4383 	 * arg.version.
4384 	 */
4385 
4386 	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4387 		     arg.version == 0)) {
4388 		DRM_ERROR("Incorrect execbuf version.\n");
4389 		return -EINVAL;
4390 	}
4391 
4392 	if (arg.version > 1 &&
4393 	    copy_from_user(&arg.context_handle,
4394 			   (void __user *) (data + copy_offset[0]),
4395 			   copy_offset[arg.version - 1] -
4396 			   copy_offset[0]) != 0)
4397 		return -EFAULT;
4398 
4399 	switch (arg.version) {
4400 	case 1:
4401 		arg.context_handle = (uint32_t) -1;
4402 		break;
4403 	case 2:
4404 		if (arg.pad64 != 0) {
4405 			DRM_ERROR("Unused IOCTL data not set to zero.\n");
4406 			return -EINVAL;
4407 		}
4408 		break;
4409 	default:
4410 		break;
4411 	}
4412 
4413 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4414 	if (unlikely(ret != 0))
4415 		return ret;
4416 
4417 	ret = vmw_execbuf_process(file_priv, dev_priv,
4418 				  (void __user *)(unsigned long)arg.commands,
4419 				  NULL, arg.command_size, arg.throttle_us,
4420 				  arg.context_handle,
4421 				  (void __user *)(unsigned long)arg.fence_rep,
4422 				  NULL);
4423 	ttm_read_unlock(&dev_priv->reservation_sem);
4424 	if (unlikely(ret != 0))
4425 		return ret;
4426 
4427 	vmw_kms_cursor_post_execbuf(dev_priv);
4428 
4429 	return 0;
4430 }
4431