1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32 
33 #define VMW_RES_HT_ORDER 12
34 
35 /**
36  * struct vmw_resource_relocation - Relocation info for resources
37  *
38  * @head: List head for the software context's relocation list.
39  * @res: Non-ref-counted pointer to the resource.
40  * @offset: Offset of 4 byte entries into the command buffer where the
41  * id that needs fixup is located.
42  */
43 struct vmw_resource_relocation {
44 	struct list_head head;
45 	const struct vmw_resource *res;
46 	unsigned long offset;
47 };
48 
49 /**
50  * struct vmw_resource_val_node - Validation info for resources
51  *
52  * @head: List head for the software context's resource list.
53  * @hash: Hash entry for quick resouce to val_node lookup.
54  * @res: Ref-counted pointer to the resource.
55  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56  * @new_backup: Refcounted pointer to the new backup buffer.
57  * @staged_bindings: If @res is a context, tracks bindings set up during
58  * the command batch. Otherwise NULL.
59  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60  * @first_usage: Set to true the first time the resource is referenced in
61  * the command stream.
62  * @no_buffer_needed: Resources do not need to allocate buffer backup on
63  * reservation. The command stream will provide one.
64  */
65 struct vmw_resource_val_node {
66 	struct list_head head;
67 	struct drm_hash_item hash;
68 	struct vmw_resource *res;
69 	struct vmw_dma_buffer *new_backup;
70 	struct vmw_ctx_binding_state *staged_bindings;
71 	unsigned long new_backup_offset;
72 	bool first_usage;
73 	bool no_buffer_needed;
74 };
75 
76 /**
77  * struct vmw_cmd_entry - Describe a command for the verifier
78  *
79  * @user_allow: Whether allowed from the execbuf ioctl.
80  * @gb_disable: Whether disabled if guest-backed objects are available.
81  * @gb_enable: Whether enabled iff guest-backed objects are available.
82  */
83 struct vmw_cmd_entry {
84 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
85 		     SVGA3dCmdHeader *);
86 	bool user_allow;
87 	bool gb_disable;
88 	bool gb_enable;
89 };
90 
91 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
92 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 				       (_gb_disable), (_gb_enable)}
94 
95 /**
96  * vmw_resource_unreserve - unreserve resources previously reserved for
97  * command submission.
98  *
99  * @list_head: list of resources to unreserve.
100  * @backoff: Whether command submission failed.
101  */
102 static void vmw_resource_list_unreserve(struct list_head *list,
103 					bool backoff)
104 {
105 	struct vmw_resource_val_node *val;
106 
107 	list_for_each_entry(val, list, head) {
108 		struct vmw_resource *res = val->res;
109 		struct vmw_dma_buffer *new_backup =
110 			backoff ? NULL : val->new_backup;
111 
112 		/*
113 		 * Transfer staged context bindings to the
114 		 * persistent context binding tracker.
115 		 */
116 		if (unlikely(val->staged_bindings)) {
117 			if (!backoff) {
118 				vmw_context_binding_state_transfer
119 					(val->res, val->staged_bindings);
120 			}
121 			kfree(val->staged_bindings);
122 			val->staged_bindings = NULL;
123 		}
124 		vmw_resource_unreserve(res, new_backup,
125 			val->new_backup_offset);
126 		vmw_dmabuf_unreference(&val->new_backup);
127 	}
128 }
129 
130 
131 /**
132  * vmw_resource_val_add - Add a resource to the software context's
133  * resource list if it's not already on it.
134  *
135  * @sw_context: Pointer to the software context.
136  * @res: Pointer to the resource.
137  * @p_node On successful return points to a valid pointer to a
138  * struct vmw_resource_val_node, if non-NULL on entry.
139  */
140 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141 				struct vmw_resource *res,
142 				struct vmw_resource_val_node **p_node)
143 {
144 	struct vmw_resource_val_node *node;
145 	struct drm_hash_item *hash;
146 	int ret;
147 
148 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
149 				    &hash) == 0)) {
150 		node = container_of(hash, struct vmw_resource_val_node, hash);
151 		node->first_usage = false;
152 		if (unlikely(p_node != NULL))
153 			*p_node = node;
154 		return 0;
155 	}
156 
157 	node = kzalloc(sizeof(*node), GFP_KERNEL);
158 	if (unlikely(node == NULL)) {
159 		DRM_ERROR("Failed to allocate a resource validation "
160 			  "entry.\n");
161 		return -ENOMEM;
162 	}
163 
164 	node->hash.key = (unsigned long) res;
165 	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
166 	if (unlikely(ret != 0)) {
167 		DRM_ERROR("Failed to initialize a resource validation "
168 			  "entry.\n");
169 		kfree(node);
170 		return ret;
171 	}
172 	list_add_tail(&node->head, &sw_context->resource_list);
173 	node->res = vmw_resource_reference(res);
174 	node->first_usage = true;
175 
176 	if (unlikely(p_node != NULL))
177 		*p_node = node;
178 
179 	return 0;
180 }
181 
182 /**
183  * vmw_resource_context_res_add - Put resources previously bound to a context on
184  * the validation list
185  *
186  * @dev_priv: Pointer to a device private structure
187  * @sw_context: Pointer to a software context used for this command submission
188  * @ctx: Pointer to the context resource
189  *
190  * This function puts all resources that were previously bound to @ctx on
191  * the resource validation list. This is part of the context state reemission
192  */
193 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194 					struct vmw_sw_context *sw_context,
195 					struct vmw_resource *ctx)
196 {
197 	struct list_head *binding_list;
198 	struct vmw_ctx_binding *entry;
199 	int ret = 0;
200 	struct vmw_resource *res;
201 
202 	mutex_lock(&dev_priv->binding_mutex);
203 	binding_list = vmw_context_binding_list(ctx);
204 
205 	list_for_each_entry(entry, binding_list, ctx_list) {
206 		res = vmw_resource_reference_unless_doomed(entry->bi.res);
207 		if (unlikely(res == NULL))
208 			continue;
209 
210 		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211 		vmw_resource_unreference(&res);
212 		if (unlikely(ret != 0))
213 			break;
214 	}
215 
216 	mutex_unlock(&dev_priv->binding_mutex);
217 	return ret;
218 }
219 
220 /**
221  * vmw_resource_relocation_add - Add a relocation to the relocation list
222  *
223  * @list: Pointer to head of relocation list.
224  * @res: The resource.
225  * @offset: Offset into the command buffer currently being parsed where the
226  * id that needs fixup is located. Granularity is 4 bytes.
227  */
228 static int vmw_resource_relocation_add(struct list_head *list,
229 				       const struct vmw_resource *res,
230 				       unsigned long offset)
231 {
232 	struct vmw_resource_relocation *rel;
233 
234 	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
235 	if (unlikely(rel == NULL)) {
236 		DRM_ERROR("Failed to allocate a resource relocation.\n");
237 		return -ENOMEM;
238 	}
239 
240 	rel->res = res;
241 	rel->offset = offset;
242 	list_add_tail(&rel->head, list);
243 
244 	return 0;
245 }
246 
247 /**
248  * vmw_resource_relocations_free - Free all relocations on a list
249  *
250  * @list: Pointer to the head of the relocation list.
251  */
252 static void vmw_resource_relocations_free(struct list_head *list)
253 {
254 	struct vmw_resource_relocation *rel, *n;
255 
256 	list_for_each_entry_safe(rel, n, list, head) {
257 		list_del(&rel->head);
258 		kfree(rel);
259 	}
260 }
261 
262 /**
263  * vmw_resource_relocations_apply - Apply all relocations on a list
264  *
265  * @cb: Pointer to the start of the command buffer bein patch. This need
266  * not be the same buffer as the one being parsed when the relocation
267  * list was built, but the contents must be the same modulo the
268  * resource ids.
269  * @list: Pointer to the head of the relocation list.
270  */
271 static void vmw_resource_relocations_apply(uint32_t *cb,
272 					   struct list_head *list)
273 {
274 	struct vmw_resource_relocation *rel;
275 
276 	list_for_each_entry(rel, list, head) {
277 		if (likely(rel->res != NULL))
278 			cb[rel->offset] = rel->res->id;
279 		else
280 			cb[rel->offset] = SVGA_3D_CMD_NOP;
281 	}
282 }
283 
284 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
285 			   struct vmw_sw_context *sw_context,
286 			   SVGA3dCmdHeader *header)
287 {
288 	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
289 }
290 
291 static int vmw_cmd_ok(struct vmw_private *dev_priv,
292 		      struct vmw_sw_context *sw_context,
293 		      SVGA3dCmdHeader *header)
294 {
295 	return 0;
296 }
297 
298 /**
299  * vmw_bo_to_validate_list - add a bo to a validate list
300  *
301  * @sw_context: The software context used for this command submission batch.
302  * @bo: The buffer object to add.
303  * @validate_as_mob: Validate this buffer as a MOB.
304  * @p_val_node: If non-NULL Will be updated with the validate node number
305  * on return.
306  *
307  * Returns -EINVAL if the limit of number of buffer objects per command
308  * submission is reached.
309  */
310 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
311 				   struct ttm_buffer_object *bo,
312 				   bool validate_as_mob,
313 				   uint32_t *p_val_node)
314 {
315 	uint32_t val_node;
316 	struct vmw_validate_buffer *vval_buf;
317 	struct ttm_validate_buffer *val_buf;
318 	struct drm_hash_item *hash;
319 	int ret;
320 
321 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
322 				    &hash) == 0)) {
323 		vval_buf = container_of(hash, struct vmw_validate_buffer,
324 					hash);
325 		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
326 			DRM_ERROR("Inconsistent buffer usage.\n");
327 			return -EINVAL;
328 		}
329 		val_buf = &vval_buf->base;
330 		val_node = vval_buf - sw_context->val_bufs;
331 	} else {
332 		val_node = sw_context->cur_val_buf;
333 		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
334 			DRM_ERROR("Max number of DMA buffers per submission "
335 				  "exceeded.\n");
336 			return -EINVAL;
337 		}
338 		vval_buf = &sw_context->val_bufs[val_node];
339 		vval_buf->hash.key = (unsigned long) bo;
340 		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341 		if (unlikely(ret != 0)) {
342 			DRM_ERROR("Failed to initialize a buffer validation "
343 				  "entry.\n");
344 			return ret;
345 		}
346 		++sw_context->cur_val_buf;
347 		val_buf = &vval_buf->base;
348 		val_buf->bo = ttm_bo_reference(bo);
349 		val_buf->reserved = false;
350 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
351 		vval_buf->validate_as_mob = validate_as_mob;
352 	}
353 
354 	sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
355 
356 	if (p_val_node)
357 		*p_val_node = val_node;
358 
359 	return 0;
360 }
361 
362 /**
363  * vmw_resources_reserve - Reserve all resources on the sw_context's
364  * resource list.
365  *
366  * @sw_context: Pointer to the software context.
367  *
368  * Note that since vmware's command submission currently is protected by
369  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
370  * since only a single thread at once will attempt this.
371  */
372 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
373 {
374 	struct vmw_resource_val_node *val;
375 	int ret;
376 
377 	list_for_each_entry(val, &sw_context->resource_list, head) {
378 		struct vmw_resource *res = val->res;
379 
380 		ret = vmw_resource_reserve(res, val->no_buffer_needed);
381 		if (unlikely(ret != 0))
382 			return ret;
383 
384 		if (res->backup) {
385 			struct ttm_buffer_object *bo = &res->backup->base;
386 
387 			ret = vmw_bo_to_validate_list
388 				(sw_context, bo,
389 				 vmw_resource_needs_backup(res), NULL);
390 
391 			if (unlikely(ret != 0))
392 				return ret;
393 		}
394 	}
395 	return 0;
396 }
397 
398 /**
399  * vmw_resources_validate - Validate all resources on the sw_context's
400  * resource list.
401  *
402  * @sw_context: Pointer to the software context.
403  *
404  * Before this function is called, all resource backup buffers must have
405  * been validated.
406  */
407 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
408 {
409 	struct vmw_resource_val_node *val;
410 	int ret;
411 
412 	list_for_each_entry(val, &sw_context->resource_list, head) {
413 		struct vmw_resource *res = val->res;
414 
415 		ret = vmw_resource_validate(res);
416 		if (unlikely(ret != 0)) {
417 			if (ret != -ERESTARTSYS)
418 				DRM_ERROR("Failed to validate resource.\n");
419 			return ret;
420 		}
421 	}
422 	return 0;
423 }
424 
425 
426 /**
427  * vmw_cmd_res_reloc_add - Add a resource to a software context's
428  * relocation- and validation lists.
429  *
430  * @dev_priv: Pointer to a struct vmw_private identifying the device.
431  * @sw_context: Pointer to the software context.
432  * @res_type: Resource type.
433  * @id_loc: Pointer to where the id that needs translation is located.
434  * @res: Valid pointer to a struct vmw_resource.
435  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
436  * used for this resource is returned here.
437  */
438 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
439 				 struct vmw_sw_context *sw_context,
440 				 enum vmw_res_type res_type,
441 				 uint32_t *id_loc,
442 				 struct vmw_resource *res,
443 				 struct vmw_resource_val_node **p_val)
444 {
445 	int ret;
446 	struct vmw_resource_val_node *node;
447 
448 	*p_val = NULL;
449 	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
450 					  res,
451 					  id_loc - sw_context->buf_start);
452 	if (unlikely(ret != 0))
453 		goto out_err;
454 
455 	ret = vmw_resource_val_add(sw_context, res, &node);
456 	if (unlikely(ret != 0))
457 		goto out_err;
458 
459 	if (res_type == vmw_res_context && dev_priv->has_mob &&
460 	    node->first_usage) {
461 
462 		/*
463 		 * Put contexts first on the list to be able to exit
464 		 * list traversal for contexts early.
465 		 */
466 		list_del(&node->head);
467 		list_add(&node->head, &sw_context->resource_list);
468 
469 		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
470 		if (unlikely(ret != 0))
471 			goto out_err;
472 		node->staged_bindings =
473 			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
474 		if (node->staged_bindings == NULL) {
475 			DRM_ERROR("Failed to allocate context binding "
476 				  "information.\n");
477 			goto out_err;
478 		}
479 		INIT_LIST_HEAD(&node->staged_bindings->list);
480 	}
481 
482 	if (p_val)
483 		*p_val = node;
484 
485 out_err:
486 	return ret;
487 }
488 
489 
490 /**
491  * vmw_cmd_res_check - Check that a resource is present and if so, put it
492  * on the resource validate list unless it's already there.
493  *
494  * @dev_priv: Pointer to a device private structure.
495  * @sw_context: Pointer to the software context.
496  * @res_type: Resource type.
497  * @converter: User-space visisble type specific information.
498  * @id_loc: Pointer to the location in the command buffer currently being
499  * parsed from where the user-space resource id handle is located.
500  * @p_val: Pointer to pointer to resource validalidation node. Populated
501  * on exit.
502  */
503 static int
504 vmw_cmd_res_check(struct vmw_private *dev_priv,
505 		  struct vmw_sw_context *sw_context,
506 		  enum vmw_res_type res_type,
507 		  const struct vmw_user_resource_conv *converter,
508 		  uint32_t *id_loc,
509 		  struct vmw_resource_val_node **p_val)
510 {
511 	struct vmw_res_cache_entry *rcache =
512 		&sw_context->res_cache[res_type];
513 	struct vmw_resource *res;
514 	struct vmw_resource_val_node *node;
515 	int ret;
516 
517 	if (*id_loc == SVGA3D_INVALID_ID) {
518 		if (p_val)
519 			*p_val = NULL;
520 		if (res_type == vmw_res_context) {
521 			DRM_ERROR("Illegal context invalid id.\n");
522 			return -EINVAL;
523 		}
524 		return 0;
525 	}
526 
527 	/*
528 	 * Fastpath in case of repeated commands referencing the same
529 	 * resource
530 	 */
531 
532 	if (likely(rcache->valid && *id_loc == rcache->handle)) {
533 		const struct vmw_resource *res = rcache->res;
534 
535 		rcache->node->first_usage = false;
536 		if (p_val)
537 			*p_val = rcache->node;
538 
539 		return vmw_resource_relocation_add
540 			(&sw_context->res_relocations, res,
541 			 id_loc - sw_context->buf_start);
542 	}
543 
544 	ret = vmw_user_resource_lookup_handle(dev_priv,
545 					      sw_context->fp->tfile,
546 					      *id_loc,
547 					      converter,
548 					      &res);
549 	if (unlikely(ret != 0)) {
550 		DRM_ERROR("Could not find or use resource 0x%08x.\n",
551 			  (unsigned) *id_loc);
552 		dump_stack();
553 		return ret;
554 	}
555 
556 	rcache->valid = true;
557 	rcache->res = res;
558 	rcache->handle = *id_loc;
559 
560 	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
561 				    res, &node);
562 	if (unlikely(ret != 0))
563 		goto out_no_reloc;
564 
565 	rcache->node = node;
566 	if (p_val)
567 		*p_val = node;
568 	vmw_resource_unreference(&res);
569 	return 0;
570 
571 out_no_reloc:
572 	BUG_ON(sw_context->error_resource != NULL);
573 	sw_context->error_resource = res;
574 
575 	return ret;
576 }
577 
578 /**
579  * vmw_rebind_contexts - Rebind all resources previously bound to
580  * referenced contexts.
581  *
582  * @sw_context: Pointer to the software context.
583  *
584  * Rebind context binding points that have been scrubbed because of eviction.
585  */
586 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
587 {
588 	struct vmw_resource_val_node *val;
589 	int ret;
590 
591 	list_for_each_entry(val, &sw_context->resource_list, head) {
592 		if (unlikely(!val->staged_bindings))
593 			break;
594 
595 		ret = vmw_context_rebind_all(val->res);
596 		if (unlikely(ret != 0)) {
597 			if (ret != -ERESTARTSYS)
598 				DRM_ERROR("Failed to rebind context.\n");
599 			return ret;
600 		}
601 	}
602 
603 	return 0;
604 }
605 
606 /**
607  * vmw_cmd_cid_check - Check a command header for valid context information.
608  *
609  * @dev_priv: Pointer to a device private structure.
610  * @sw_context: Pointer to the software context.
611  * @header: A command header with an embedded user-space context handle.
612  *
613  * Convenience function: Call vmw_cmd_res_check with the user-space context
614  * handle embedded in @header.
615  */
616 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
617 			     struct vmw_sw_context *sw_context,
618 			     SVGA3dCmdHeader *header)
619 {
620 	struct vmw_cid_cmd {
621 		SVGA3dCmdHeader header;
622 		uint32_t cid;
623 	} *cmd;
624 
625 	cmd = container_of(header, struct vmw_cid_cmd, header);
626 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
627 				 user_context_converter, &cmd->cid, NULL);
628 }
629 
630 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
631 					   struct vmw_sw_context *sw_context,
632 					   SVGA3dCmdHeader *header)
633 {
634 	struct vmw_sid_cmd {
635 		SVGA3dCmdHeader header;
636 		SVGA3dCmdSetRenderTarget body;
637 	} *cmd;
638 	struct vmw_resource_val_node *ctx_node;
639 	struct vmw_resource_val_node *res_node;
640 	int ret;
641 
642 	cmd = container_of(header, struct vmw_sid_cmd, header);
643 
644 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
645 				user_context_converter, &cmd->body.cid,
646 				&ctx_node);
647 	if (unlikely(ret != 0))
648 		return ret;
649 
650 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
651 				user_surface_converter,
652 				&cmd->body.target.sid, &res_node);
653 	if (unlikely(ret != 0))
654 		return ret;
655 
656 	if (dev_priv->has_mob) {
657 		struct vmw_ctx_bindinfo bi;
658 
659 		bi.ctx = ctx_node->res;
660 		bi.res = res_node ? res_node->res : NULL;
661 		bi.bt = vmw_ctx_binding_rt;
662 		bi.i1.rt_type = cmd->body.type;
663 		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
664 	}
665 
666 	return 0;
667 }
668 
669 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
670 				      struct vmw_sw_context *sw_context,
671 				      SVGA3dCmdHeader *header)
672 {
673 	struct vmw_sid_cmd {
674 		SVGA3dCmdHeader header;
675 		SVGA3dCmdSurfaceCopy body;
676 	} *cmd;
677 	int ret;
678 
679 	cmd = container_of(header, struct vmw_sid_cmd, header);
680 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
681 				user_surface_converter,
682 				&cmd->body.src.sid, NULL);
683 	if (unlikely(ret != 0))
684 		return ret;
685 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
686 				 user_surface_converter,
687 				 &cmd->body.dest.sid, NULL);
688 }
689 
690 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
691 				     struct vmw_sw_context *sw_context,
692 				     SVGA3dCmdHeader *header)
693 {
694 	struct vmw_sid_cmd {
695 		SVGA3dCmdHeader header;
696 		SVGA3dCmdSurfaceStretchBlt body;
697 	} *cmd;
698 	int ret;
699 
700 	cmd = container_of(header, struct vmw_sid_cmd, header);
701 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
702 				user_surface_converter,
703 				&cmd->body.src.sid, NULL);
704 	if (unlikely(ret != 0))
705 		return ret;
706 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
707 				 user_surface_converter,
708 				 &cmd->body.dest.sid, NULL);
709 }
710 
711 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
712 					 struct vmw_sw_context *sw_context,
713 					 SVGA3dCmdHeader *header)
714 {
715 	struct vmw_sid_cmd {
716 		SVGA3dCmdHeader header;
717 		SVGA3dCmdBlitSurfaceToScreen body;
718 	} *cmd;
719 
720 	cmd = container_of(header, struct vmw_sid_cmd, header);
721 
722 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
723 				 user_surface_converter,
724 				 &cmd->body.srcImage.sid, NULL);
725 }
726 
727 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
728 				 struct vmw_sw_context *sw_context,
729 				 SVGA3dCmdHeader *header)
730 {
731 	struct vmw_sid_cmd {
732 		SVGA3dCmdHeader header;
733 		SVGA3dCmdPresent body;
734 	} *cmd;
735 
736 
737 	cmd = container_of(header, struct vmw_sid_cmd, header);
738 
739 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
740 				 user_surface_converter, &cmd->body.sid,
741 				 NULL);
742 }
743 
744 /**
745  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
746  *
747  * @dev_priv: The device private structure.
748  * @new_query_bo: The new buffer holding query results.
749  * @sw_context: The software context used for this command submission.
750  *
751  * This function checks whether @new_query_bo is suitable for holding
752  * query results, and if another buffer currently is pinned for query
753  * results. If so, the function prepares the state of @sw_context for
754  * switching pinned buffers after successful submission of the current
755  * command batch.
756  */
757 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
758 				       struct ttm_buffer_object *new_query_bo,
759 				       struct vmw_sw_context *sw_context)
760 {
761 	struct vmw_res_cache_entry *ctx_entry =
762 		&sw_context->res_cache[vmw_res_context];
763 	int ret;
764 
765 	BUG_ON(!ctx_entry->valid);
766 	sw_context->last_query_ctx = ctx_entry->res;
767 
768 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
769 
770 		if (unlikely(new_query_bo->num_pages > 4)) {
771 			DRM_ERROR("Query buffer too large.\n");
772 			return -EINVAL;
773 		}
774 
775 		if (unlikely(sw_context->cur_query_bo != NULL)) {
776 			sw_context->needs_post_query_barrier = true;
777 			ret = vmw_bo_to_validate_list(sw_context,
778 						      sw_context->cur_query_bo,
779 						      dev_priv->has_mob, NULL);
780 			if (unlikely(ret != 0))
781 				return ret;
782 		}
783 		sw_context->cur_query_bo = new_query_bo;
784 
785 		ret = vmw_bo_to_validate_list(sw_context,
786 					      dev_priv->dummy_query_bo,
787 					      dev_priv->has_mob, NULL);
788 		if (unlikely(ret != 0))
789 			return ret;
790 
791 	}
792 
793 	return 0;
794 }
795 
796 
797 /**
798  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
799  *
800  * @dev_priv: The device private structure.
801  * @sw_context: The software context used for this command submission batch.
802  *
803  * This function will check if we're switching query buffers, and will then,
804  * issue a dummy occlusion query wait used as a query barrier. When the fence
805  * object following that query wait has signaled, we are sure that all
806  * preceding queries have finished, and the old query buffer can be unpinned.
807  * However, since both the new query buffer and the old one are fenced with
808  * that fence, we can do an asynchronus unpin now, and be sure that the
809  * old query buffer won't be moved until the fence has signaled.
810  *
811  * As mentioned above, both the new - and old query buffers need to be fenced
812  * using a sequence emitted *after* calling this function.
813  */
814 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
815 				     struct vmw_sw_context *sw_context)
816 {
817 	/*
818 	 * The validate list should still hold references to all
819 	 * contexts here.
820 	 */
821 
822 	if (sw_context->needs_post_query_barrier) {
823 		struct vmw_res_cache_entry *ctx_entry =
824 			&sw_context->res_cache[vmw_res_context];
825 		struct vmw_resource *ctx;
826 		int ret;
827 
828 		BUG_ON(!ctx_entry->valid);
829 		ctx = ctx_entry->res;
830 
831 		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
832 
833 		if (unlikely(ret != 0))
834 			DRM_ERROR("Out of fifo space for dummy query.\n");
835 	}
836 
837 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
838 		if (dev_priv->pinned_bo) {
839 			vmw_bo_pin(dev_priv->pinned_bo, false);
840 			ttm_bo_unref(&dev_priv->pinned_bo);
841 		}
842 
843 		if (!sw_context->needs_post_query_barrier) {
844 			vmw_bo_pin(sw_context->cur_query_bo, true);
845 
846 			/*
847 			 * We pin also the dummy_query_bo buffer so that we
848 			 * don't need to validate it when emitting
849 			 * dummy queries in context destroy paths.
850 			 */
851 
852 			vmw_bo_pin(dev_priv->dummy_query_bo, true);
853 			dev_priv->dummy_query_bo_pinned = true;
854 
855 			BUG_ON(sw_context->last_query_ctx == NULL);
856 			dev_priv->query_cid = sw_context->last_query_ctx->id;
857 			dev_priv->query_cid_valid = true;
858 			dev_priv->pinned_bo =
859 				ttm_bo_reference(sw_context->cur_query_bo);
860 		}
861 	}
862 }
863 
864 /**
865  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
866  * handle to a MOB id.
867  *
868  * @dev_priv: Pointer to a device private structure.
869  * @sw_context: The software context used for this command batch validation.
870  * @id: Pointer to the user-space handle to be translated.
871  * @vmw_bo_p: Points to a location that, on successful return will carry
872  * a reference-counted pointer to the DMA buffer identified by the
873  * user-space handle in @id.
874  *
875  * This function saves information needed to translate a user-space buffer
876  * handle to a MOB id. The translation does not take place immediately, but
877  * during a call to vmw_apply_relocations(). This function builds a relocation
878  * list and a list of buffers to validate. The former needs to be freed using
879  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
880  * needs to be freed using vmw_clear_validations.
881  */
882 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
883 				 struct vmw_sw_context *sw_context,
884 				 SVGAMobId *id,
885 				 struct vmw_dma_buffer **vmw_bo_p)
886 {
887 	struct vmw_dma_buffer *vmw_bo = NULL;
888 	struct ttm_buffer_object *bo;
889 	uint32_t handle = *id;
890 	struct vmw_relocation *reloc;
891 	int ret;
892 
893 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
894 	if (unlikely(ret != 0)) {
895 		DRM_ERROR("Could not find or use MOB buffer.\n");
896 		return -EINVAL;
897 	}
898 	bo = &vmw_bo->base;
899 
900 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
901 		DRM_ERROR("Max number relocations per submission"
902 			  " exceeded\n");
903 		ret = -EINVAL;
904 		goto out_no_reloc;
905 	}
906 
907 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
908 	reloc->mob_loc = id;
909 	reloc->location = NULL;
910 
911 	ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
912 	if (unlikely(ret != 0))
913 		goto out_no_reloc;
914 
915 	*vmw_bo_p = vmw_bo;
916 	return 0;
917 
918 out_no_reloc:
919 	vmw_dmabuf_unreference(&vmw_bo);
920 	vmw_bo_p = NULL;
921 	return ret;
922 }
923 
924 /**
925  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
926  * handle to a valid SVGAGuestPtr
927  *
928  * @dev_priv: Pointer to a device private structure.
929  * @sw_context: The software context used for this command batch validation.
930  * @ptr: Pointer to the user-space handle to be translated.
931  * @vmw_bo_p: Points to a location that, on successful return will carry
932  * a reference-counted pointer to the DMA buffer identified by the
933  * user-space handle in @id.
934  *
935  * This function saves information needed to translate a user-space buffer
936  * handle to a valid SVGAGuestPtr. The translation does not take place
937  * immediately, but during a call to vmw_apply_relocations().
938  * This function builds a relocation list and a list of buffers to validate.
939  * The former needs to be freed using either vmw_apply_relocations() or
940  * vmw_free_relocations(). The latter needs to be freed using
941  * vmw_clear_validations.
942  */
943 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
944 				   struct vmw_sw_context *sw_context,
945 				   SVGAGuestPtr *ptr,
946 				   struct vmw_dma_buffer **vmw_bo_p)
947 {
948 	struct vmw_dma_buffer *vmw_bo = NULL;
949 	struct ttm_buffer_object *bo;
950 	uint32_t handle = ptr->gmrId;
951 	struct vmw_relocation *reloc;
952 	int ret;
953 
954 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
955 	if (unlikely(ret != 0)) {
956 		DRM_ERROR("Could not find or use GMR region.\n");
957 		return -EINVAL;
958 	}
959 	bo = &vmw_bo->base;
960 
961 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
962 		DRM_ERROR("Max number relocations per submission"
963 			  " exceeded\n");
964 		ret = -EINVAL;
965 		goto out_no_reloc;
966 	}
967 
968 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
969 	reloc->location = ptr;
970 
971 	ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
972 	if (unlikely(ret != 0))
973 		goto out_no_reloc;
974 
975 	*vmw_bo_p = vmw_bo;
976 	return 0;
977 
978 out_no_reloc:
979 	vmw_dmabuf_unreference(&vmw_bo);
980 	vmw_bo_p = NULL;
981 	return ret;
982 }
983 
984 /**
985  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
986  *
987  * @dev_priv: Pointer to a device private struct.
988  * @sw_context: The software context used for this command submission.
989  * @header: Pointer to the command header in the command stream.
990  */
991 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
992 				  struct vmw_sw_context *sw_context,
993 				  SVGA3dCmdHeader *header)
994 {
995 	struct vmw_begin_gb_query_cmd {
996 		SVGA3dCmdHeader header;
997 		SVGA3dCmdBeginGBQuery q;
998 	} *cmd;
999 
1000 	cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1001 			   header);
1002 
1003 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1004 				 user_context_converter, &cmd->q.cid,
1005 				 NULL);
1006 }
1007 
1008 /**
1009  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1010  *
1011  * @dev_priv: Pointer to a device private struct.
1012  * @sw_context: The software context used for this command submission.
1013  * @header: Pointer to the command header in the command stream.
1014  */
1015 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1016 			       struct vmw_sw_context *sw_context,
1017 			       SVGA3dCmdHeader *header)
1018 {
1019 	struct vmw_begin_query_cmd {
1020 		SVGA3dCmdHeader header;
1021 		SVGA3dCmdBeginQuery q;
1022 	} *cmd;
1023 
1024 	cmd = container_of(header, struct vmw_begin_query_cmd,
1025 			   header);
1026 
1027 	if (unlikely(dev_priv->has_mob)) {
1028 		struct {
1029 			SVGA3dCmdHeader header;
1030 			SVGA3dCmdBeginGBQuery q;
1031 		} gb_cmd;
1032 
1033 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1034 
1035 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1036 		gb_cmd.header.size = cmd->header.size;
1037 		gb_cmd.q.cid = cmd->q.cid;
1038 		gb_cmd.q.type = cmd->q.type;
1039 
1040 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1041 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1042 	}
1043 
1044 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1045 				 user_context_converter, &cmd->q.cid,
1046 				 NULL);
1047 }
1048 
1049 /**
1050  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1051  *
1052  * @dev_priv: Pointer to a device private struct.
1053  * @sw_context: The software context used for this command submission.
1054  * @header: Pointer to the command header in the command stream.
1055  */
1056 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1057 				struct vmw_sw_context *sw_context,
1058 				SVGA3dCmdHeader *header)
1059 {
1060 	struct vmw_dma_buffer *vmw_bo;
1061 	struct vmw_query_cmd {
1062 		SVGA3dCmdHeader header;
1063 		SVGA3dCmdEndGBQuery q;
1064 	} *cmd;
1065 	int ret;
1066 
1067 	cmd = container_of(header, struct vmw_query_cmd, header);
1068 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1069 	if (unlikely(ret != 0))
1070 		return ret;
1071 
1072 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1073 				    &cmd->q.mobid,
1074 				    &vmw_bo);
1075 	if (unlikely(ret != 0))
1076 		return ret;
1077 
1078 	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1079 
1080 	vmw_dmabuf_unreference(&vmw_bo);
1081 	return ret;
1082 }
1083 
1084 /**
1085  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1086  *
1087  * @dev_priv: Pointer to a device private struct.
1088  * @sw_context: The software context used for this command submission.
1089  * @header: Pointer to the command header in the command stream.
1090  */
1091 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1092 			     struct vmw_sw_context *sw_context,
1093 			     SVGA3dCmdHeader *header)
1094 {
1095 	struct vmw_dma_buffer *vmw_bo;
1096 	struct vmw_query_cmd {
1097 		SVGA3dCmdHeader header;
1098 		SVGA3dCmdEndQuery q;
1099 	} *cmd;
1100 	int ret;
1101 
1102 	cmd = container_of(header, struct vmw_query_cmd, header);
1103 	if (dev_priv->has_mob) {
1104 		struct {
1105 			SVGA3dCmdHeader header;
1106 			SVGA3dCmdEndGBQuery q;
1107 		} gb_cmd;
1108 
1109 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1110 
1111 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1112 		gb_cmd.header.size = cmd->header.size;
1113 		gb_cmd.q.cid = cmd->q.cid;
1114 		gb_cmd.q.type = cmd->q.type;
1115 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1116 		gb_cmd.q.offset = cmd->q.guestResult.offset;
1117 
1118 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1119 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1120 	}
1121 
1122 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1123 	if (unlikely(ret != 0))
1124 		return ret;
1125 
1126 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1127 				      &cmd->q.guestResult,
1128 				      &vmw_bo);
1129 	if (unlikely(ret != 0))
1130 		return ret;
1131 
1132 	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1133 
1134 	vmw_dmabuf_unreference(&vmw_bo);
1135 	return ret;
1136 }
1137 
1138 /**
1139  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1140  *
1141  * @dev_priv: Pointer to a device private struct.
1142  * @sw_context: The software context used for this command submission.
1143  * @header: Pointer to the command header in the command stream.
1144  */
1145 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1146 				 struct vmw_sw_context *sw_context,
1147 				 SVGA3dCmdHeader *header)
1148 {
1149 	struct vmw_dma_buffer *vmw_bo;
1150 	struct vmw_query_cmd {
1151 		SVGA3dCmdHeader header;
1152 		SVGA3dCmdWaitForGBQuery q;
1153 	} *cmd;
1154 	int ret;
1155 
1156 	cmd = container_of(header, struct vmw_query_cmd, header);
1157 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1158 	if (unlikely(ret != 0))
1159 		return ret;
1160 
1161 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1162 				    &cmd->q.mobid,
1163 				    &vmw_bo);
1164 	if (unlikely(ret != 0))
1165 		return ret;
1166 
1167 	vmw_dmabuf_unreference(&vmw_bo);
1168 	return 0;
1169 }
1170 
1171 /**
1172  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1173  *
1174  * @dev_priv: Pointer to a device private struct.
1175  * @sw_context: The software context used for this command submission.
1176  * @header: Pointer to the command header in the command stream.
1177  */
1178 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1179 			      struct vmw_sw_context *sw_context,
1180 			      SVGA3dCmdHeader *header)
1181 {
1182 	struct vmw_dma_buffer *vmw_bo;
1183 	struct vmw_query_cmd {
1184 		SVGA3dCmdHeader header;
1185 		SVGA3dCmdWaitForQuery q;
1186 	} *cmd;
1187 	int ret;
1188 
1189 	cmd = container_of(header, struct vmw_query_cmd, header);
1190 	if (dev_priv->has_mob) {
1191 		struct {
1192 			SVGA3dCmdHeader header;
1193 			SVGA3dCmdWaitForGBQuery q;
1194 		} gb_cmd;
1195 
1196 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1197 
1198 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1199 		gb_cmd.header.size = cmd->header.size;
1200 		gb_cmd.q.cid = cmd->q.cid;
1201 		gb_cmd.q.type = cmd->q.type;
1202 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1203 		gb_cmd.q.offset = cmd->q.guestResult.offset;
1204 
1205 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1206 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1207 	}
1208 
1209 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1210 	if (unlikely(ret != 0))
1211 		return ret;
1212 
1213 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1214 				      &cmd->q.guestResult,
1215 				      &vmw_bo);
1216 	if (unlikely(ret != 0))
1217 		return ret;
1218 
1219 	vmw_dmabuf_unreference(&vmw_bo);
1220 	return 0;
1221 }
1222 
1223 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1224 		       struct vmw_sw_context *sw_context,
1225 		       SVGA3dCmdHeader *header)
1226 {
1227 	struct vmw_dma_buffer *vmw_bo = NULL;
1228 	struct vmw_surface *srf = NULL;
1229 	struct vmw_dma_cmd {
1230 		SVGA3dCmdHeader header;
1231 		SVGA3dCmdSurfaceDMA dma;
1232 	} *cmd;
1233 	int ret;
1234 	SVGA3dCmdSurfaceDMASuffix *suffix;
1235 	uint32_t bo_size;
1236 
1237 	cmd = container_of(header, struct vmw_dma_cmd, header);
1238 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1239 					       header->size - sizeof(*suffix));
1240 
1241 	/* Make sure device and verifier stays in sync. */
1242 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1243 		DRM_ERROR("Invalid DMA suffix size.\n");
1244 		return -EINVAL;
1245 	}
1246 
1247 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1248 				      &cmd->dma.guest.ptr,
1249 				      &vmw_bo);
1250 	if (unlikely(ret != 0))
1251 		return ret;
1252 
1253 	/* Make sure DMA doesn't cross BO boundaries. */
1254 	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1255 	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1256 		DRM_ERROR("Invalid DMA offset.\n");
1257 		return -EINVAL;
1258 	}
1259 
1260 	bo_size -= cmd->dma.guest.ptr.offset;
1261 	if (unlikely(suffix->maximumOffset > bo_size))
1262 		suffix->maximumOffset = bo_size;
1263 
1264 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1265 				user_surface_converter, &cmd->dma.host.sid,
1266 				NULL);
1267 	if (unlikely(ret != 0)) {
1268 		if (unlikely(ret != -ERESTARTSYS))
1269 			DRM_ERROR("could not find surface for DMA.\n");
1270 		goto out_no_surface;
1271 	}
1272 
1273 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1274 
1275 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1276 			     header);
1277 
1278 out_no_surface:
1279 	vmw_dmabuf_unreference(&vmw_bo);
1280 	return ret;
1281 }
1282 
1283 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1284 			struct vmw_sw_context *sw_context,
1285 			SVGA3dCmdHeader *header)
1286 {
1287 	struct vmw_draw_cmd {
1288 		SVGA3dCmdHeader header;
1289 		SVGA3dCmdDrawPrimitives body;
1290 	} *cmd;
1291 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1292 		(unsigned long)header + sizeof(*cmd));
1293 	SVGA3dPrimitiveRange *range;
1294 	uint32_t i;
1295 	uint32_t maxnum;
1296 	int ret;
1297 
1298 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1299 	if (unlikely(ret != 0))
1300 		return ret;
1301 
1302 	cmd = container_of(header, struct vmw_draw_cmd, header);
1303 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1304 
1305 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1306 		DRM_ERROR("Illegal number of vertex declarations.\n");
1307 		return -EINVAL;
1308 	}
1309 
1310 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1311 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1312 					user_surface_converter,
1313 					&decl->array.surfaceId, NULL);
1314 		if (unlikely(ret != 0))
1315 			return ret;
1316 	}
1317 
1318 	maxnum = (header->size - sizeof(cmd->body) -
1319 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1320 	if (unlikely(cmd->body.numRanges > maxnum)) {
1321 		DRM_ERROR("Illegal number of index ranges.\n");
1322 		return -EINVAL;
1323 	}
1324 
1325 	range = (SVGA3dPrimitiveRange *) decl;
1326 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1327 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1328 					user_surface_converter,
1329 					&range->indexArray.surfaceId, NULL);
1330 		if (unlikely(ret != 0))
1331 			return ret;
1332 	}
1333 	return 0;
1334 }
1335 
1336 
1337 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1338 			     struct vmw_sw_context *sw_context,
1339 			     SVGA3dCmdHeader *header)
1340 {
1341 	struct vmw_tex_state_cmd {
1342 		SVGA3dCmdHeader header;
1343 		SVGA3dCmdSetTextureState state;
1344 	} *cmd;
1345 
1346 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1347 	  ((unsigned long) header + header->size + sizeof(header));
1348 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1349 		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1350 	struct vmw_resource_val_node *ctx_node;
1351 	struct vmw_resource_val_node *res_node;
1352 	int ret;
1353 
1354 	cmd = container_of(header, struct vmw_tex_state_cmd,
1355 			   header);
1356 
1357 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1358 				user_context_converter, &cmd->state.cid,
1359 				&ctx_node);
1360 	if (unlikely(ret != 0))
1361 		return ret;
1362 
1363 	for (; cur_state < last_state; ++cur_state) {
1364 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1365 			continue;
1366 
1367 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1368 					user_surface_converter,
1369 					&cur_state->value, &res_node);
1370 		if (unlikely(ret != 0))
1371 			return ret;
1372 
1373 		if (dev_priv->has_mob) {
1374 			struct vmw_ctx_bindinfo bi;
1375 
1376 			bi.ctx = ctx_node->res;
1377 			bi.res = res_node ? res_node->res : NULL;
1378 			bi.bt = vmw_ctx_binding_tex;
1379 			bi.i1.texture_stage = cur_state->stage;
1380 			vmw_context_binding_add(ctx_node->staged_bindings,
1381 						&bi);
1382 		}
1383 	}
1384 
1385 	return 0;
1386 }
1387 
1388 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1389 				      struct vmw_sw_context *sw_context,
1390 				      void *buf)
1391 {
1392 	struct vmw_dma_buffer *vmw_bo;
1393 	int ret;
1394 
1395 	struct {
1396 		uint32_t header;
1397 		SVGAFifoCmdDefineGMRFB body;
1398 	} *cmd = buf;
1399 
1400 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1401 				      &cmd->body.ptr,
1402 				      &vmw_bo);
1403 	if (unlikely(ret != 0))
1404 		return ret;
1405 
1406 	vmw_dmabuf_unreference(&vmw_bo);
1407 
1408 	return ret;
1409 }
1410 
1411 /**
1412  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1413  *
1414  * @dev_priv: Pointer to a device private struct.
1415  * @sw_context: The software context being used for this batch.
1416  * @res_type: The resource type.
1417  * @converter: Information about user-space binding for this resource type.
1418  * @res_id: Pointer to the user-space resource handle in the command stream.
1419  * @buf_id: Pointer to the user-space backup buffer handle in the command
1420  * stream.
1421  * @backup_offset: Offset of backup into MOB.
1422  *
1423  * This function prepares for registering a switch of backup buffers
1424  * in the resource metadata just prior to unreserving.
1425  */
1426 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1427 				 struct vmw_sw_context *sw_context,
1428 				 enum vmw_res_type res_type,
1429 				 const struct vmw_user_resource_conv
1430 				 *converter,
1431 				 uint32_t *res_id,
1432 				 uint32_t *buf_id,
1433 				 unsigned long backup_offset)
1434 {
1435 	int ret;
1436 	struct vmw_dma_buffer *dma_buf;
1437 	struct vmw_resource_val_node *val_node;
1438 
1439 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1440 				converter, res_id, &val_node);
1441 	if (unlikely(ret != 0))
1442 		return ret;
1443 
1444 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1445 	if (unlikely(ret != 0))
1446 		return ret;
1447 
1448 	if (val_node->first_usage)
1449 		val_node->no_buffer_needed = true;
1450 
1451 	vmw_dmabuf_unreference(&val_node->new_backup);
1452 	val_node->new_backup = dma_buf;
1453 	val_node->new_backup_offset = backup_offset;
1454 
1455 	return 0;
1456 }
1457 
1458 /**
1459  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1460  * command
1461  *
1462  * @dev_priv: Pointer to a device private struct.
1463  * @sw_context: The software context being used for this batch.
1464  * @header: Pointer to the command header in the command stream.
1465  */
1466 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1467 				   struct vmw_sw_context *sw_context,
1468 				   SVGA3dCmdHeader *header)
1469 {
1470 	struct vmw_bind_gb_surface_cmd {
1471 		SVGA3dCmdHeader header;
1472 		SVGA3dCmdBindGBSurface body;
1473 	} *cmd;
1474 
1475 	cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1476 
1477 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1478 				     user_surface_converter,
1479 				     &cmd->body.sid, &cmd->body.mobid,
1480 				     0);
1481 }
1482 
1483 /**
1484  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1485  * command
1486  *
1487  * @dev_priv: Pointer to a device private struct.
1488  * @sw_context: The software context being used for this batch.
1489  * @header: Pointer to the command header in the command stream.
1490  */
1491 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1492 				   struct vmw_sw_context *sw_context,
1493 				   SVGA3dCmdHeader *header)
1494 {
1495 	struct vmw_gb_surface_cmd {
1496 		SVGA3dCmdHeader header;
1497 		SVGA3dCmdUpdateGBImage body;
1498 	} *cmd;
1499 
1500 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1501 
1502 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1503 				 user_surface_converter,
1504 				 &cmd->body.image.sid, NULL);
1505 }
1506 
1507 /**
1508  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1509  * command
1510  *
1511  * @dev_priv: Pointer to a device private struct.
1512  * @sw_context: The software context being used for this batch.
1513  * @header: Pointer to the command header in the command stream.
1514  */
1515 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1516 				     struct vmw_sw_context *sw_context,
1517 				     SVGA3dCmdHeader *header)
1518 {
1519 	struct vmw_gb_surface_cmd {
1520 		SVGA3dCmdHeader header;
1521 		SVGA3dCmdUpdateGBSurface body;
1522 	} *cmd;
1523 
1524 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1525 
1526 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1527 				 user_surface_converter,
1528 				 &cmd->body.sid, NULL);
1529 }
1530 
1531 /**
1532  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1533  * command
1534  *
1535  * @dev_priv: Pointer to a device private struct.
1536  * @sw_context: The software context being used for this batch.
1537  * @header: Pointer to the command header in the command stream.
1538  */
1539 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1540 				     struct vmw_sw_context *sw_context,
1541 				     SVGA3dCmdHeader *header)
1542 {
1543 	struct vmw_gb_surface_cmd {
1544 		SVGA3dCmdHeader header;
1545 		SVGA3dCmdReadbackGBImage body;
1546 	} *cmd;
1547 
1548 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1549 
1550 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1551 				 user_surface_converter,
1552 				 &cmd->body.image.sid, NULL);
1553 }
1554 
1555 /**
1556  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1557  * command
1558  *
1559  * @dev_priv: Pointer to a device private struct.
1560  * @sw_context: The software context being used for this batch.
1561  * @header: Pointer to the command header in the command stream.
1562  */
1563 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1564 				       struct vmw_sw_context *sw_context,
1565 				       SVGA3dCmdHeader *header)
1566 {
1567 	struct vmw_gb_surface_cmd {
1568 		SVGA3dCmdHeader header;
1569 		SVGA3dCmdReadbackGBSurface body;
1570 	} *cmd;
1571 
1572 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1573 
1574 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1575 				 user_surface_converter,
1576 				 &cmd->body.sid, NULL);
1577 }
1578 
1579 /**
1580  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1581  * command
1582  *
1583  * @dev_priv: Pointer to a device private struct.
1584  * @sw_context: The software context being used for this batch.
1585  * @header: Pointer to the command header in the command stream.
1586  */
1587 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1588 				       struct vmw_sw_context *sw_context,
1589 				       SVGA3dCmdHeader *header)
1590 {
1591 	struct vmw_gb_surface_cmd {
1592 		SVGA3dCmdHeader header;
1593 		SVGA3dCmdInvalidateGBImage body;
1594 	} *cmd;
1595 
1596 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1597 
1598 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1599 				 user_surface_converter,
1600 				 &cmd->body.image.sid, NULL);
1601 }
1602 
1603 /**
1604  * vmw_cmd_invalidate_gb_surface - Validate an
1605  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1606  *
1607  * @dev_priv: Pointer to a device private struct.
1608  * @sw_context: The software context being used for this batch.
1609  * @header: Pointer to the command header in the command stream.
1610  */
1611 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1612 					 struct vmw_sw_context *sw_context,
1613 					 SVGA3dCmdHeader *header)
1614 {
1615 	struct vmw_gb_surface_cmd {
1616 		SVGA3dCmdHeader header;
1617 		SVGA3dCmdInvalidateGBSurface body;
1618 	} *cmd;
1619 
1620 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1621 
1622 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1623 				 user_surface_converter,
1624 				 &cmd->body.sid, NULL);
1625 }
1626 
1627 
1628 /**
1629  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1630  * command
1631  *
1632  * @dev_priv: Pointer to a device private struct.
1633  * @sw_context: The software context being used for this batch.
1634  * @header: Pointer to the command header in the command stream.
1635  */
1636 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1637 				 struct vmw_sw_context *sw_context,
1638 				 SVGA3dCmdHeader *header)
1639 {
1640 	struct vmw_shader_define_cmd {
1641 		SVGA3dCmdHeader header;
1642 		SVGA3dCmdDefineShader body;
1643 	} *cmd;
1644 	int ret;
1645 	size_t size;
1646 	struct vmw_resource_val_node *val;
1647 
1648 	cmd = container_of(header, struct vmw_shader_define_cmd,
1649 			   header);
1650 
1651 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1652 				user_context_converter, &cmd->body.cid,
1653 				&val);
1654 	if (unlikely(ret != 0))
1655 		return ret;
1656 
1657 	if (unlikely(!dev_priv->has_mob))
1658 		return 0;
1659 
1660 	size = cmd->header.size - sizeof(cmd->body);
1661 	ret = vmw_compat_shader_add(dev_priv,
1662 				    vmw_context_res_man(val->res),
1663 				    cmd->body.shid, cmd + 1,
1664 				    cmd->body.type, size,
1665 				    &sw_context->staged_cmd_res);
1666 	if (unlikely(ret != 0))
1667 		return ret;
1668 
1669 	return vmw_resource_relocation_add(&sw_context->res_relocations,
1670 					   NULL, &cmd->header.id -
1671 					   sw_context->buf_start);
1672 
1673 	return 0;
1674 }
1675 
1676 /**
1677  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1678  * command
1679  *
1680  * @dev_priv: Pointer to a device private struct.
1681  * @sw_context: The software context being used for this batch.
1682  * @header: Pointer to the command header in the command stream.
1683  */
1684 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1685 				  struct vmw_sw_context *sw_context,
1686 				  SVGA3dCmdHeader *header)
1687 {
1688 	struct vmw_shader_destroy_cmd {
1689 		SVGA3dCmdHeader header;
1690 		SVGA3dCmdDestroyShader body;
1691 	} *cmd;
1692 	int ret;
1693 	struct vmw_resource_val_node *val;
1694 
1695 	cmd = container_of(header, struct vmw_shader_destroy_cmd,
1696 			   header);
1697 
1698 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1699 				user_context_converter, &cmd->body.cid,
1700 				&val);
1701 	if (unlikely(ret != 0))
1702 		return ret;
1703 
1704 	if (unlikely(!dev_priv->has_mob))
1705 		return 0;
1706 
1707 	ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
1708 				       cmd->body.shid,
1709 				       cmd->body.type,
1710 				       &sw_context->staged_cmd_res);
1711 	if (unlikely(ret != 0))
1712 		return ret;
1713 
1714 	return vmw_resource_relocation_add(&sw_context->res_relocations,
1715 					   NULL, &cmd->header.id -
1716 					   sw_context->buf_start);
1717 
1718 	return 0;
1719 }
1720 
1721 /**
1722  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1723  * command
1724  *
1725  * @dev_priv: Pointer to a device private struct.
1726  * @sw_context: The software context being used for this batch.
1727  * @header: Pointer to the command header in the command stream.
1728  */
1729 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1730 			      struct vmw_sw_context *sw_context,
1731 			      SVGA3dCmdHeader *header)
1732 {
1733 	struct vmw_set_shader_cmd {
1734 		SVGA3dCmdHeader header;
1735 		SVGA3dCmdSetShader body;
1736 	} *cmd;
1737 	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1738 	struct vmw_ctx_bindinfo bi;
1739 	struct vmw_resource *res = NULL;
1740 	int ret;
1741 
1742 	cmd = container_of(header, struct vmw_set_shader_cmd,
1743 			   header);
1744 
1745 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1746 				user_context_converter, &cmd->body.cid,
1747 				&ctx_node);
1748 	if (unlikely(ret != 0))
1749 		return ret;
1750 
1751 	if (!dev_priv->has_mob)
1752 		return 0;
1753 
1754 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
1755 		res = vmw_compat_shader_lookup
1756 			(vmw_context_res_man(ctx_node->res),
1757 			 cmd->body.shid,
1758 			 cmd->body.type);
1759 
1760 		if (!IS_ERR(res)) {
1761 			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1762 						    vmw_res_shader,
1763 						    &cmd->body.shid, res,
1764 						    &res_node);
1765 			vmw_resource_unreference(&res);
1766 			if (unlikely(ret != 0))
1767 				return ret;
1768 		}
1769 	}
1770 
1771 	if (!res_node) {
1772 		ret = vmw_cmd_res_check(dev_priv, sw_context,
1773 					vmw_res_shader,
1774 					user_shader_converter,
1775 					&cmd->body.shid, &res_node);
1776 		if (unlikely(ret != 0))
1777 			return ret;
1778 	}
1779 
1780 	bi.ctx = ctx_node->res;
1781 	bi.res = res_node ? res_node->res : NULL;
1782 	bi.bt = vmw_ctx_binding_shader;
1783 	bi.i1.shader_type = cmd->body.type;
1784 	return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1785 }
1786 
1787 /**
1788  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1789  * command
1790  *
1791  * @dev_priv: Pointer to a device private struct.
1792  * @sw_context: The software context being used for this batch.
1793  * @header: Pointer to the command header in the command stream.
1794  */
1795 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1796 				    struct vmw_sw_context *sw_context,
1797 				    SVGA3dCmdHeader *header)
1798 {
1799 	struct vmw_set_shader_const_cmd {
1800 		SVGA3dCmdHeader header;
1801 		SVGA3dCmdSetShaderConst body;
1802 	} *cmd;
1803 	int ret;
1804 
1805 	cmd = container_of(header, struct vmw_set_shader_const_cmd,
1806 			   header);
1807 
1808 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1809 				user_context_converter, &cmd->body.cid,
1810 				NULL);
1811 	if (unlikely(ret != 0))
1812 		return ret;
1813 
1814 	if (dev_priv->has_mob)
1815 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1816 
1817 	return 0;
1818 }
1819 
1820 /**
1821  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1822  * command
1823  *
1824  * @dev_priv: Pointer to a device private struct.
1825  * @sw_context: The software context being used for this batch.
1826  * @header: Pointer to the command header in the command stream.
1827  */
1828 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1829 				  struct vmw_sw_context *sw_context,
1830 				  SVGA3dCmdHeader *header)
1831 {
1832 	struct vmw_bind_gb_shader_cmd {
1833 		SVGA3dCmdHeader header;
1834 		SVGA3dCmdBindGBShader body;
1835 	} *cmd;
1836 
1837 	cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1838 			   header);
1839 
1840 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1841 				     user_shader_converter,
1842 				     &cmd->body.shid, &cmd->body.mobid,
1843 				     cmd->body.offsetInBytes);
1844 }
1845 
1846 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1847 				struct vmw_sw_context *sw_context,
1848 				void *buf, uint32_t *size)
1849 {
1850 	uint32_t size_remaining = *size;
1851 	uint32_t cmd_id;
1852 
1853 	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1854 	switch (cmd_id) {
1855 	case SVGA_CMD_UPDATE:
1856 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1857 		break;
1858 	case SVGA_CMD_DEFINE_GMRFB:
1859 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1860 		break;
1861 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1862 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1863 		break;
1864 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1865 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1866 		break;
1867 	default:
1868 		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1869 		return -EINVAL;
1870 	}
1871 
1872 	if (*size > size_remaining) {
1873 		DRM_ERROR("Invalid SVGA command (size mismatch):"
1874 			  " %u.\n", cmd_id);
1875 		return -EINVAL;
1876 	}
1877 
1878 	if (unlikely(!sw_context->kernel)) {
1879 		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1880 		return -EPERM;
1881 	}
1882 
1883 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1884 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1885 
1886 	return 0;
1887 }
1888 
1889 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1890 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1891 		    false, false, false),
1892 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1893 		    false, false, false),
1894 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1895 		    true, false, false),
1896 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1897 		    true, false, false),
1898 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1899 		    true, false, false),
1900 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1901 		    false, false, false),
1902 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1903 		    false, false, false),
1904 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1905 		    true, false, false),
1906 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1907 		    true, false, false),
1908 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1909 		    true, false, false),
1910 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1911 		    &vmw_cmd_set_render_target_check, true, false, false),
1912 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1913 		    true, false, false),
1914 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1915 		    true, false, false),
1916 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1917 		    true, false, false),
1918 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1919 		    true, false, false),
1920 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1921 		    true, false, false),
1922 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1923 		    true, false, false),
1924 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1925 		    true, false, false),
1926 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1927 		    false, false, false),
1928 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1929 		    true, false, false),
1930 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1931 		    true, false, false),
1932 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1933 		    true, false, false),
1934 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1935 		    true, false, false),
1936 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1937 		    true, false, false),
1938 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1939 		    true, false, false),
1940 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1941 		    true, false, false),
1942 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1943 		    true, false, false),
1944 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1945 		    true, false, false),
1946 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1947 		    true, false, false),
1948 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1949 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
1950 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1951 		    false, false, false),
1952 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1953 		    false, false, false),
1954 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1955 		    false, false, false),
1956 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1957 		    false, false, false),
1958 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1959 		    false, false, false),
1960 	VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1961 		    false, false, false),
1962 	VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1963 		    false, false, false),
1964 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1965 		    false, false, false),
1966 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1967 		    false, false, false),
1968 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1969 		    false, false, false),
1970 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1971 		    false, false, false),
1972 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1973 		    false, false, false),
1974 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1975 		    false, false, false),
1976 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1977 		    false, false, true),
1978 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1979 		    false, false, true),
1980 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1981 		    false, false, true),
1982 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1983 		    false, false, true),
1984 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1985 		    false, false, true),
1986 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1987 		    false, false, true),
1988 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1989 		    false, false, true),
1990 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1991 		    false, false, true),
1992 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1993 		    true, false, true),
1994 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1995 		    false, false, true),
1996 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1997 		    true, false, true),
1998 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1999 		    &vmw_cmd_update_gb_surface, true, false, true),
2000 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
2001 		    &vmw_cmd_readback_gb_image, true, false, true),
2002 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2003 		    &vmw_cmd_readback_gb_surface, true, false, true),
2004 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2005 		    &vmw_cmd_invalidate_gb_image, true, false, true),
2006 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2007 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
2008 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2009 		    false, false, true),
2010 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2011 		    false, false, true),
2012 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2013 		    false, false, true),
2014 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2015 		    false, false, true),
2016 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2017 		    false, false, true),
2018 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2019 		    false, false, true),
2020 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2021 		    true, false, true),
2022 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2023 		    false, false, true),
2024 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2025 		    false, false, false),
2026 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2027 		    true, false, true),
2028 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2029 		    true, false, true),
2030 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2031 		    true, false, true),
2032 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2033 		    true, false, true),
2034 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2035 		    false, false, true),
2036 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2037 		    false, false, true),
2038 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2039 		    false, false, true),
2040 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2041 		    false, false, true),
2042 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2043 		    false, false, true),
2044 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2045 		    false, false, true),
2046 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2047 		    false, false, true),
2048 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2049 		    false, false, true),
2050 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2051 		    false, false, true),
2052 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2053 		    false, false, true),
2054 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2055 		    true, false, true)
2056 };
2057 
2058 static int vmw_cmd_check(struct vmw_private *dev_priv,
2059 			 struct vmw_sw_context *sw_context,
2060 			 void *buf, uint32_t *size)
2061 {
2062 	uint32_t cmd_id;
2063 	uint32_t size_remaining = *size;
2064 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2065 	int ret;
2066 	const struct vmw_cmd_entry *entry;
2067 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2068 
2069 	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
2070 	/* Handle any none 3D commands */
2071 	if (unlikely(cmd_id < SVGA_CMD_MAX))
2072 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2073 
2074 
2075 	cmd_id = le32_to_cpu(header->id);
2076 	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
2077 
2078 	cmd_id -= SVGA_3D_CMD_BASE;
2079 	if (unlikely(*size > size_remaining))
2080 		goto out_invalid;
2081 
2082 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
2083 		goto out_invalid;
2084 
2085 	entry = &vmw_cmd_entries[cmd_id];
2086 	if (unlikely(!entry->func))
2087 		goto out_invalid;
2088 
2089 	if (unlikely(!entry->user_allow && !sw_context->kernel))
2090 		goto out_privileged;
2091 
2092 	if (unlikely(entry->gb_disable && gb))
2093 		goto out_old;
2094 
2095 	if (unlikely(entry->gb_enable && !gb))
2096 		goto out_new;
2097 
2098 	ret = entry->func(dev_priv, sw_context, header);
2099 	if (unlikely(ret != 0))
2100 		goto out_invalid;
2101 
2102 	return 0;
2103 out_invalid:
2104 	DRM_ERROR("Invalid SVGA3D command: %d\n",
2105 		  cmd_id + SVGA_3D_CMD_BASE);
2106 	return -EINVAL;
2107 out_privileged:
2108 	DRM_ERROR("Privileged SVGA3D command: %d\n",
2109 		  cmd_id + SVGA_3D_CMD_BASE);
2110 	return -EPERM;
2111 out_old:
2112 	DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2113 		  cmd_id + SVGA_3D_CMD_BASE);
2114 	return -EINVAL;
2115 out_new:
2116 	DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
2117 		  cmd_id + SVGA_3D_CMD_BASE);
2118 	return -EINVAL;
2119 }
2120 
2121 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2122 			     struct vmw_sw_context *sw_context,
2123 			     void *buf,
2124 			     uint32_t size)
2125 {
2126 	int32_t cur_size = size;
2127 	int ret;
2128 
2129 	sw_context->buf_start = buf;
2130 
2131 	while (cur_size > 0) {
2132 		size = cur_size;
2133 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2134 		if (unlikely(ret != 0))
2135 			return ret;
2136 		buf = (void *)((unsigned long) buf + size);
2137 		cur_size -= size;
2138 	}
2139 
2140 	if (unlikely(cur_size != 0)) {
2141 		DRM_ERROR("Command verifier out of sync.\n");
2142 		return -EINVAL;
2143 	}
2144 
2145 	return 0;
2146 }
2147 
2148 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2149 {
2150 	sw_context->cur_reloc = 0;
2151 }
2152 
2153 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2154 {
2155 	uint32_t i;
2156 	struct vmw_relocation *reloc;
2157 	struct ttm_validate_buffer *validate;
2158 	struct ttm_buffer_object *bo;
2159 
2160 	for (i = 0; i < sw_context->cur_reloc; ++i) {
2161 		reloc = &sw_context->relocs[i];
2162 		validate = &sw_context->val_bufs[reloc->index].base;
2163 		bo = validate->bo;
2164 		switch (bo->mem.mem_type) {
2165 		case TTM_PL_VRAM:
2166 			reloc->location->offset += bo->offset;
2167 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
2168 			break;
2169 		case VMW_PL_GMR:
2170 			reloc->location->gmrId = bo->mem.start;
2171 			break;
2172 		case VMW_PL_MOB:
2173 			*reloc->mob_loc = bo->mem.start;
2174 			break;
2175 		default:
2176 			BUG();
2177 		}
2178 	}
2179 	vmw_free_relocations(sw_context);
2180 }
2181 
2182 /**
2183  * vmw_resource_list_unrefererence - Free up a resource list and unreference
2184  * all resources referenced by it.
2185  *
2186  * @list: The resource list.
2187  */
2188 static void vmw_resource_list_unreference(struct list_head *list)
2189 {
2190 	struct vmw_resource_val_node *val, *val_next;
2191 
2192 	/*
2193 	 * Drop references to resources held during command submission.
2194 	 */
2195 
2196 	list_for_each_entry_safe(val, val_next, list, head) {
2197 		list_del_init(&val->head);
2198 		vmw_resource_unreference(&val->res);
2199 		if (unlikely(val->staged_bindings))
2200 			kfree(val->staged_bindings);
2201 		kfree(val);
2202 	}
2203 }
2204 
2205 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2206 {
2207 	struct vmw_validate_buffer *entry, *next;
2208 	struct vmw_resource_val_node *val;
2209 
2210 	/*
2211 	 * Drop references to DMA buffers held during command submission.
2212 	 */
2213 	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
2214 				 base.head) {
2215 		list_del(&entry->base.head);
2216 		ttm_bo_unref(&entry->base.bo);
2217 		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
2218 		sw_context->cur_val_buf--;
2219 	}
2220 	BUG_ON(sw_context->cur_val_buf != 0);
2221 
2222 	list_for_each_entry(val, &sw_context->resource_list, head)
2223 		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2224 }
2225 
2226 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2227 				      struct ttm_buffer_object *bo,
2228 				      bool validate_as_mob)
2229 {
2230 	int ret;
2231 
2232 
2233 	/*
2234 	 * Don't validate pinned buffers.
2235 	 */
2236 
2237 	if (bo == dev_priv->pinned_bo ||
2238 	    (bo == dev_priv->dummy_query_bo &&
2239 	     dev_priv->dummy_query_bo_pinned))
2240 		return 0;
2241 
2242 	if (validate_as_mob)
2243 		return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
2244 
2245 	/**
2246 	 * Put BO in VRAM if there is space, otherwise as a GMR.
2247 	 * If there is no space in VRAM and GMR ids are all used up,
2248 	 * start evicting GMRs to make room. If the DMA buffer can't be
2249 	 * used as a GMR, this will return -ENOMEM.
2250 	 */
2251 
2252 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
2253 	if (likely(ret == 0 || ret == -ERESTARTSYS))
2254 		return ret;
2255 
2256 	/**
2257 	 * If that failed, try VRAM again, this time evicting
2258 	 * previous contents.
2259 	 */
2260 
2261 	DRM_INFO("Falling through to VRAM.\n");
2262 	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
2263 	return ret;
2264 }
2265 
2266 static int vmw_validate_buffers(struct vmw_private *dev_priv,
2267 				struct vmw_sw_context *sw_context)
2268 {
2269 	struct vmw_validate_buffer *entry;
2270 	int ret;
2271 
2272 	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2273 		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2274 						 entry->validate_as_mob);
2275 		if (unlikely(ret != 0))
2276 			return ret;
2277 	}
2278 	return 0;
2279 }
2280 
2281 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2282 				 uint32_t size)
2283 {
2284 	if (likely(sw_context->cmd_bounce_size >= size))
2285 		return 0;
2286 
2287 	if (sw_context->cmd_bounce_size == 0)
2288 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2289 
2290 	while (sw_context->cmd_bounce_size < size) {
2291 		sw_context->cmd_bounce_size =
2292 			PAGE_ALIGN(sw_context->cmd_bounce_size +
2293 				   (sw_context->cmd_bounce_size >> 1));
2294 	}
2295 
2296 	if (sw_context->cmd_bounce != NULL)
2297 		vfree(sw_context->cmd_bounce);
2298 
2299 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2300 
2301 	if (sw_context->cmd_bounce == NULL) {
2302 		DRM_ERROR("Failed to allocate command bounce buffer.\n");
2303 		sw_context->cmd_bounce_size = 0;
2304 		return -ENOMEM;
2305 	}
2306 
2307 	return 0;
2308 }
2309 
2310 /**
2311  * vmw_execbuf_fence_commands - create and submit a command stream fence
2312  *
2313  * Creates a fence object and submits a command stream marker.
2314  * If this fails for some reason, We sync the fifo and return NULL.
2315  * It is then safe to fence buffers with a NULL pointer.
2316  *
2317  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2318  * a userspace handle if @p_handle is not NULL, otherwise not.
2319  */
2320 
2321 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2322 			       struct vmw_private *dev_priv,
2323 			       struct vmw_fence_obj **p_fence,
2324 			       uint32_t *p_handle)
2325 {
2326 	uint32_t sequence;
2327 	int ret;
2328 	bool synced = false;
2329 
2330 	/* p_handle implies file_priv. */
2331 	BUG_ON(p_handle != NULL && file_priv == NULL);
2332 
2333 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
2334 	if (unlikely(ret != 0)) {
2335 		DRM_ERROR("Fence submission error. Syncing.\n");
2336 		synced = true;
2337 	}
2338 
2339 	if (p_handle != NULL)
2340 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2341 					    sequence,
2342 					    DRM_VMW_FENCE_FLAG_EXEC,
2343 					    p_fence, p_handle);
2344 	else
2345 		ret = vmw_fence_create(dev_priv->fman, sequence,
2346 				       DRM_VMW_FENCE_FLAG_EXEC,
2347 				       p_fence);
2348 
2349 	if (unlikely(ret != 0 && !synced)) {
2350 		(void) vmw_fallback_wait(dev_priv, false, false,
2351 					 sequence, false,
2352 					 VMW_FENCE_WAIT_TIMEOUT);
2353 		*p_fence = NULL;
2354 	}
2355 
2356 	return 0;
2357 }
2358 
2359 /**
2360  * vmw_execbuf_copy_fence_user - copy fence object information to
2361  * user-space.
2362  *
2363  * @dev_priv: Pointer to a vmw_private struct.
2364  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2365  * @ret: Return value from fence object creation.
2366  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2367  * which the information should be copied.
2368  * @fence: Pointer to the fenc object.
2369  * @fence_handle: User-space fence handle.
2370  *
2371  * This function copies fence information to user-space. If copying fails,
2372  * The user-space struct drm_vmw_fence_rep::error member is hopefully
2373  * left untouched, and if it's preloaded with an -EFAULT by user-space,
2374  * the error will hopefully be detected.
2375  * Also if copying fails, user-space will be unable to signal the fence
2376  * object so we wait for it immediately, and then unreference the
2377  * user-space reference.
2378  */
2379 void
2380 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2381 			    struct vmw_fpriv *vmw_fp,
2382 			    int ret,
2383 			    struct drm_vmw_fence_rep __user *user_fence_rep,
2384 			    struct vmw_fence_obj *fence,
2385 			    uint32_t fence_handle)
2386 {
2387 	struct drm_vmw_fence_rep fence_rep;
2388 
2389 	if (user_fence_rep == NULL)
2390 		return;
2391 
2392 	memset(&fence_rep, 0, sizeof(fence_rep));
2393 
2394 	fence_rep.error = ret;
2395 	if (ret == 0) {
2396 		BUG_ON(fence == NULL);
2397 
2398 		fence_rep.handle = fence_handle;
2399 		fence_rep.seqno = fence->seqno;
2400 		vmw_update_seqno(dev_priv, &dev_priv->fifo);
2401 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
2402 	}
2403 
2404 	/*
2405 	 * copy_to_user errors will be detected by user space not
2406 	 * seeing fence_rep::error filled in. Typically
2407 	 * user-space would have pre-set that member to -EFAULT.
2408 	 */
2409 	ret = copy_to_user(user_fence_rep, &fence_rep,
2410 			   sizeof(fence_rep));
2411 
2412 	/*
2413 	 * User-space lost the fence object. We need to sync
2414 	 * and unreference the handle.
2415 	 */
2416 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2417 		ttm_ref_object_base_unref(vmw_fp->tfile,
2418 					  fence_handle, TTM_REF_USAGE);
2419 		DRM_ERROR("Fence copy error. Syncing.\n");
2420 		(void) vmw_fence_obj_wait(fence, fence->signal_mask,
2421 					  false, false,
2422 					  VMW_FENCE_WAIT_TIMEOUT);
2423 	}
2424 }
2425 
2426 
2427 
2428 int vmw_execbuf_process(struct drm_file *file_priv,
2429 			struct vmw_private *dev_priv,
2430 			void __user *user_commands,
2431 			void *kernel_commands,
2432 			uint32_t command_size,
2433 			uint64_t throttle_us,
2434 			struct drm_vmw_fence_rep __user *user_fence_rep,
2435 			struct vmw_fence_obj **out_fence)
2436 {
2437 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
2438 	struct vmw_fence_obj *fence = NULL;
2439 	struct vmw_resource *error_resource;
2440 	struct list_head resource_list;
2441 	struct ww_acquire_ctx ticket;
2442 	uint32_t handle;
2443 	void *cmd;
2444 	int ret;
2445 
2446 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2447 	if (unlikely(ret != 0))
2448 		return -ERESTARTSYS;
2449 
2450 	if (kernel_commands == NULL) {
2451 		sw_context->kernel = false;
2452 
2453 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
2454 		if (unlikely(ret != 0))
2455 			goto out_unlock;
2456 
2457 
2458 		ret = copy_from_user(sw_context->cmd_bounce,
2459 				     user_commands, command_size);
2460 
2461 		if (unlikely(ret != 0)) {
2462 			ret = -EFAULT;
2463 			DRM_ERROR("Failed copying commands.\n");
2464 			goto out_unlock;
2465 		}
2466 		kernel_commands = sw_context->cmd_bounce;
2467 	} else
2468 		sw_context->kernel = true;
2469 
2470 	sw_context->fp = vmw_fpriv(file_priv);
2471 	sw_context->cur_reloc = 0;
2472 	sw_context->cur_val_buf = 0;
2473 	sw_context->fence_flags = 0;
2474 	INIT_LIST_HEAD(&sw_context->resource_list);
2475 	sw_context->cur_query_bo = dev_priv->pinned_bo;
2476 	sw_context->last_query_ctx = NULL;
2477 	sw_context->needs_post_query_barrier = false;
2478 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2479 	INIT_LIST_HEAD(&sw_context->validate_nodes);
2480 	INIT_LIST_HEAD(&sw_context->res_relocations);
2481 	if (!sw_context->res_ht_initialized) {
2482 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2483 		if (unlikely(ret != 0))
2484 			goto out_unlock;
2485 		sw_context->res_ht_initialized = true;
2486 	}
2487 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
2488 
2489 	INIT_LIST_HEAD(&resource_list);
2490 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2491 				command_size);
2492 	if (unlikely(ret != 0))
2493 		goto out_err_nores;
2494 
2495 	ret = vmw_resources_reserve(sw_context);
2496 	if (unlikely(ret != 0))
2497 		goto out_err_nores;
2498 
2499 	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
2500 	if (unlikely(ret != 0))
2501 		goto out_err;
2502 
2503 	ret = vmw_validate_buffers(dev_priv, sw_context);
2504 	if (unlikely(ret != 0))
2505 		goto out_err;
2506 
2507 	ret = vmw_resources_validate(sw_context);
2508 	if (unlikely(ret != 0))
2509 		goto out_err;
2510 
2511 	if (throttle_us) {
2512 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2513 				   throttle_us);
2514 
2515 		if (unlikely(ret != 0))
2516 			goto out_err;
2517 	}
2518 
2519 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2520 	if (unlikely(ret != 0)) {
2521 		ret = -ERESTARTSYS;
2522 		goto out_err;
2523 	}
2524 
2525 	if (dev_priv->has_mob) {
2526 		ret = vmw_rebind_contexts(sw_context);
2527 		if (unlikely(ret != 0))
2528 			goto out_unlock_binding;
2529 	}
2530 
2531 	cmd = vmw_fifo_reserve(dev_priv, command_size);
2532 	if (unlikely(cmd == NULL)) {
2533 		DRM_ERROR("Failed reserving fifo space for commands.\n");
2534 		ret = -ENOMEM;
2535 		goto out_unlock_binding;
2536 	}
2537 
2538 	vmw_apply_relocations(sw_context);
2539 	memcpy(cmd, kernel_commands, command_size);
2540 
2541 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2542 	vmw_resource_relocations_free(&sw_context->res_relocations);
2543 
2544 	vmw_fifo_commit(dev_priv, command_size);
2545 
2546 	vmw_query_bo_switch_commit(dev_priv, sw_context);
2547 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2548 					 &fence,
2549 					 (user_fence_rep) ? &handle : NULL);
2550 	/*
2551 	 * This error is harmless, because if fence submission fails,
2552 	 * vmw_fifo_send_fence will sync. The error will be propagated to
2553 	 * user-space in @fence_rep
2554 	 */
2555 
2556 	if (ret != 0)
2557 		DRM_ERROR("Fence submission error. Syncing.\n");
2558 
2559 	vmw_resource_list_unreserve(&sw_context->resource_list, false);
2560 	mutex_unlock(&dev_priv->binding_mutex);
2561 
2562 	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2563 				    (void *) fence);
2564 
2565 	if (unlikely(dev_priv->pinned_bo != NULL &&
2566 		     !dev_priv->query_cid_valid))
2567 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
2568 
2569 	vmw_clear_validations(sw_context);
2570 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2571 				    user_fence_rep, fence, handle);
2572 
2573 	/* Don't unreference when handing fence out */
2574 	if (unlikely(out_fence != NULL)) {
2575 		*out_fence = fence;
2576 		fence = NULL;
2577 	} else if (likely(fence != NULL)) {
2578 		vmw_fence_obj_unreference(&fence);
2579 	}
2580 
2581 	list_splice_init(&sw_context->resource_list, &resource_list);
2582 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
2583 	mutex_unlock(&dev_priv->cmdbuf_mutex);
2584 
2585 	/*
2586 	 * Unreference resources outside of the cmdbuf_mutex to
2587 	 * avoid deadlocks in resource destruction paths.
2588 	 */
2589 	vmw_resource_list_unreference(&resource_list);
2590 
2591 	return 0;
2592 
2593 out_unlock_binding:
2594 	mutex_unlock(&dev_priv->binding_mutex);
2595 out_err:
2596 	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2597 out_err_nores:
2598 	vmw_resource_list_unreserve(&sw_context->resource_list, true);
2599 	vmw_resource_relocations_free(&sw_context->res_relocations);
2600 	vmw_free_relocations(sw_context);
2601 	vmw_clear_validations(sw_context);
2602 	if (unlikely(dev_priv->pinned_bo != NULL &&
2603 		     !dev_priv->query_cid_valid))
2604 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2605 out_unlock:
2606 	list_splice_init(&sw_context->resource_list, &resource_list);
2607 	error_resource = sw_context->error_resource;
2608 	sw_context->error_resource = NULL;
2609 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
2610 	mutex_unlock(&dev_priv->cmdbuf_mutex);
2611 
2612 	/*
2613 	 * Unreference resources outside of the cmdbuf_mutex to
2614 	 * avoid deadlocks in resource destruction paths.
2615 	 */
2616 	vmw_resource_list_unreference(&resource_list);
2617 	if (unlikely(error_resource != NULL))
2618 		vmw_resource_unreference(&error_resource);
2619 
2620 	return ret;
2621 }
2622 
2623 /**
2624  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2625  *
2626  * @dev_priv: The device private structure.
2627  *
2628  * This function is called to idle the fifo and unpin the query buffer
2629  * if the normal way to do this hits an error, which should typically be
2630  * extremely rare.
2631  */
2632 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2633 {
2634 	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2635 
2636 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2637 	vmw_bo_pin(dev_priv->pinned_bo, false);
2638 	vmw_bo_pin(dev_priv->dummy_query_bo, false);
2639 	dev_priv->dummy_query_bo_pinned = false;
2640 }
2641 
2642 
2643 /**
2644  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2645  * query bo.
2646  *
2647  * @dev_priv: The device private structure.
2648  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2649  * _after_ a query barrier that flushes all queries touching the current
2650  * buffer pointed to by @dev_priv->pinned_bo
2651  *
2652  * This function should be used to unpin the pinned query bo, or
2653  * as a query barrier when we need to make sure that all queries have
2654  * finished before the next fifo command. (For example on hardware
2655  * context destructions where the hardware may otherwise leak unfinished
2656  * queries).
2657  *
2658  * This function does not return any failure codes, but make attempts
2659  * to do safe unpinning in case of errors.
2660  *
2661  * The function will synchronize on the previous query barrier, and will
2662  * thus not finish until that barrier has executed.
2663  *
2664  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2665  * before calling this function.
2666  */
2667 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2668 				     struct vmw_fence_obj *fence)
2669 {
2670 	int ret = 0;
2671 	struct list_head validate_list;
2672 	struct ttm_validate_buffer pinned_val, query_val;
2673 	struct vmw_fence_obj *lfence = NULL;
2674 	struct ww_acquire_ctx ticket;
2675 
2676 	if (dev_priv->pinned_bo == NULL)
2677 		goto out_unlock;
2678 
2679 	INIT_LIST_HEAD(&validate_list);
2680 
2681 	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2682 	list_add_tail(&pinned_val.head, &validate_list);
2683 
2684 	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2685 	list_add_tail(&query_val.head, &validate_list);
2686 
2687 	do {
2688 		ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
2689 	} while (ret == -ERESTARTSYS);
2690 
2691 	if (unlikely(ret != 0)) {
2692 		vmw_execbuf_unpin_panic(dev_priv);
2693 		goto out_no_reserve;
2694 	}
2695 
2696 	if (dev_priv->query_cid_valid) {
2697 		BUG_ON(fence != NULL);
2698 		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2699 		if (unlikely(ret != 0)) {
2700 			vmw_execbuf_unpin_panic(dev_priv);
2701 			goto out_no_emit;
2702 		}
2703 		dev_priv->query_cid_valid = false;
2704 	}
2705 
2706 	vmw_bo_pin(dev_priv->pinned_bo, false);
2707 	vmw_bo_pin(dev_priv->dummy_query_bo, false);
2708 	dev_priv->dummy_query_bo_pinned = false;
2709 
2710 	if (fence == NULL) {
2711 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2712 						  NULL);
2713 		fence = lfence;
2714 	}
2715 	ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2716 	if (lfence != NULL)
2717 		vmw_fence_obj_unreference(&lfence);
2718 
2719 	ttm_bo_unref(&query_val.bo);
2720 	ttm_bo_unref(&pinned_val.bo);
2721 	ttm_bo_unref(&dev_priv->pinned_bo);
2722 
2723 out_unlock:
2724 	return;
2725 
2726 out_no_emit:
2727 	ttm_eu_backoff_reservation(&ticket, &validate_list);
2728 out_no_reserve:
2729 	ttm_bo_unref(&query_val.bo);
2730 	ttm_bo_unref(&pinned_val.bo);
2731 	ttm_bo_unref(&dev_priv->pinned_bo);
2732 }
2733 
2734 /**
2735  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2736  * query bo.
2737  *
2738  * @dev_priv: The device private structure.
2739  *
2740  * This function should be used to unpin the pinned query bo, or
2741  * as a query barrier when we need to make sure that all queries have
2742  * finished before the next fifo command. (For example on hardware
2743  * context destructions where the hardware may otherwise leak unfinished
2744  * queries).
2745  *
2746  * This function does not return any failure codes, but make attempts
2747  * to do safe unpinning in case of errors.
2748  *
2749  * The function will synchronize on the previous query barrier, and will
2750  * thus not finish until that barrier has executed.
2751  */
2752 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2753 {
2754 	mutex_lock(&dev_priv->cmdbuf_mutex);
2755 	if (dev_priv->query_cid_valid)
2756 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2757 	mutex_unlock(&dev_priv->cmdbuf_mutex);
2758 }
2759 
2760 
2761 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2762 		      struct drm_file *file_priv)
2763 {
2764 	struct vmw_private *dev_priv = vmw_priv(dev);
2765 	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2766 	int ret;
2767 
2768 	/*
2769 	 * This will allow us to extend the ioctl argument while
2770 	 * maintaining backwards compatibility:
2771 	 * We take different code paths depending on the value of
2772 	 * arg->version.
2773 	 */
2774 
2775 	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2776 		DRM_ERROR("Incorrect execbuf version.\n");
2777 		DRM_ERROR("You're running outdated experimental "
2778 			  "vmwgfx user-space drivers.");
2779 		return -EINVAL;
2780 	}
2781 
2782 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2783 	if (unlikely(ret != 0))
2784 		return ret;
2785 
2786 	ret = vmw_execbuf_process(file_priv, dev_priv,
2787 				  (void __user *)(unsigned long)arg->commands,
2788 				  NULL, arg->command_size, arg->throttle_us,
2789 				  (void __user *)(unsigned long)arg->fence_rep,
2790 				  NULL);
2791 
2792 	if (unlikely(ret != 0))
2793 		goto out_unlock;
2794 
2795 	vmw_kms_cursor_post_execbuf(dev_priv);
2796 
2797 out_unlock:
2798 	ttm_read_unlock(&dev_priv->reservation_sem);
2799 	return ret;
2800 }
2801