xref: /openbmc/linux/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c (revision 7051924f771722c6dd235e693742cda6488ac700)
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32 
33 #define VMW_RES_HT_ORDER 12
34 
35 /**
36  * struct vmw_resource_relocation - Relocation info for resources
37  *
38  * @head: List head for the software context's relocation list.
39  * @res: Non-ref-counted pointer to the resource.
40  * @offset: Offset of 4 byte entries into the command buffer where the
41  * id that needs fixup is located.
42  */
43 struct vmw_resource_relocation {
44 	struct list_head head;
45 	const struct vmw_resource *res;
46 	unsigned long offset;
47 };
48 
49 /**
50  * struct vmw_resource_val_node - Validation info for resources
51  *
52  * @head: List head for the software context's resource list.
53  * @hash: Hash entry for quick resouce to val_node lookup.
54  * @res: Ref-counted pointer to the resource.
55  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56  * @new_backup: Refcounted pointer to the new backup buffer.
57  * @staged_bindings: If @res is a context, tracks bindings set up during
58  * the command batch. Otherwise NULL.
59  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60  * @first_usage: Set to true the first time the resource is referenced in
61  * the command stream.
62  * @no_buffer_needed: Resources do not need to allocate buffer backup on
63  * reservation. The command stream will provide one.
64  */
65 struct vmw_resource_val_node {
66 	struct list_head head;
67 	struct drm_hash_item hash;
68 	struct vmw_resource *res;
69 	struct vmw_dma_buffer *new_backup;
70 	struct vmw_ctx_binding_state *staged_bindings;
71 	unsigned long new_backup_offset;
72 	bool first_usage;
73 	bool no_buffer_needed;
74 };
75 
76 /**
77  * struct vmw_cmd_entry - Describe a command for the verifier
78  *
79  * @user_allow: Whether allowed from the execbuf ioctl.
80  * @gb_disable: Whether disabled if guest-backed objects are available.
81  * @gb_enable: Whether enabled iff guest-backed objects are available.
82  */
83 struct vmw_cmd_entry {
84 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
85 		     SVGA3dCmdHeader *);
86 	bool user_allow;
87 	bool gb_disable;
88 	bool gb_enable;
89 };
90 
91 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
92 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 				       (_gb_disable), (_gb_enable)}
94 
95 /**
96  * vmw_resource_unreserve - unreserve resources previously reserved for
97  * command submission.
98  *
99  * @list_head: list of resources to unreserve.
100  * @backoff: Whether command submission failed.
101  */
102 static void vmw_resource_list_unreserve(struct list_head *list,
103 					bool backoff)
104 {
105 	struct vmw_resource_val_node *val;
106 
107 	list_for_each_entry(val, list, head) {
108 		struct vmw_resource *res = val->res;
109 		struct vmw_dma_buffer *new_backup =
110 			backoff ? NULL : val->new_backup;
111 
112 		/*
113 		 * Transfer staged context bindings to the
114 		 * persistent context binding tracker.
115 		 */
116 		if (unlikely(val->staged_bindings)) {
117 			if (!backoff) {
118 				vmw_context_binding_state_transfer
119 					(val->res, val->staged_bindings);
120 			}
121 			kfree(val->staged_bindings);
122 			val->staged_bindings = NULL;
123 		}
124 		vmw_resource_unreserve(res, new_backup,
125 			val->new_backup_offset);
126 		vmw_dmabuf_unreference(&val->new_backup);
127 	}
128 }
129 
130 
131 /**
132  * vmw_resource_val_add - Add a resource to the software context's
133  * resource list if it's not already on it.
134  *
135  * @sw_context: Pointer to the software context.
136  * @res: Pointer to the resource.
137  * @p_node On successful return points to a valid pointer to a
138  * struct vmw_resource_val_node, if non-NULL on entry.
139  */
140 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141 				struct vmw_resource *res,
142 				struct vmw_resource_val_node **p_node)
143 {
144 	struct vmw_resource_val_node *node;
145 	struct drm_hash_item *hash;
146 	int ret;
147 
148 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
149 				    &hash) == 0)) {
150 		node = container_of(hash, struct vmw_resource_val_node, hash);
151 		node->first_usage = false;
152 		if (unlikely(p_node != NULL))
153 			*p_node = node;
154 		return 0;
155 	}
156 
157 	node = kzalloc(sizeof(*node), GFP_KERNEL);
158 	if (unlikely(node == NULL)) {
159 		DRM_ERROR("Failed to allocate a resource validation "
160 			  "entry.\n");
161 		return -ENOMEM;
162 	}
163 
164 	node->hash.key = (unsigned long) res;
165 	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
166 	if (unlikely(ret != 0)) {
167 		DRM_ERROR("Failed to initialize a resource validation "
168 			  "entry.\n");
169 		kfree(node);
170 		return ret;
171 	}
172 	list_add_tail(&node->head, &sw_context->resource_list);
173 	node->res = vmw_resource_reference(res);
174 	node->first_usage = true;
175 
176 	if (unlikely(p_node != NULL))
177 		*p_node = node;
178 
179 	return 0;
180 }
181 
182 /**
183  * vmw_resource_context_res_add - Put resources previously bound to a context on
184  * the validation list
185  *
186  * @dev_priv: Pointer to a device private structure
187  * @sw_context: Pointer to a software context used for this command submission
188  * @ctx: Pointer to the context resource
189  *
190  * This function puts all resources that were previously bound to @ctx on
191  * the resource validation list. This is part of the context state reemission
192  */
193 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194 					struct vmw_sw_context *sw_context,
195 					struct vmw_resource *ctx)
196 {
197 	struct list_head *binding_list;
198 	struct vmw_ctx_binding *entry;
199 	int ret = 0;
200 	struct vmw_resource *res;
201 
202 	mutex_lock(&dev_priv->binding_mutex);
203 	binding_list = vmw_context_binding_list(ctx);
204 
205 	list_for_each_entry(entry, binding_list, ctx_list) {
206 		res = vmw_resource_reference_unless_doomed(entry->bi.res);
207 		if (unlikely(res == NULL))
208 			continue;
209 
210 		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211 		vmw_resource_unreference(&res);
212 		if (unlikely(ret != 0))
213 			break;
214 	}
215 
216 	mutex_unlock(&dev_priv->binding_mutex);
217 	return ret;
218 }
219 
220 /**
221  * vmw_resource_relocation_add - Add a relocation to the relocation list
222  *
223  * @list: Pointer to head of relocation list.
224  * @res: The resource.
225  * @offset: Offset into the command buffer currently being parsed where the
226  * id that needs fixup is located. Granularity is 4 bytes.
227  */
228 static int vmw_resource_relocation_add(struct list_head *list,
229 				       const struct vmw_resource *res,
230 				       unsigned long offset)
231 {
232 	struct vmw_resource_relocation *rel;
233 
234 	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
235 	if (unlikely(rel == NULL)) {
236 		DRM_ERROR("Failed to allocate a resource relocation.\n");
237 		return -ENOMEM;
238 	}
239 
240 	rel->res = res;
241 	rel->offset = offset;
242 	list_add_tail(&rel->head, list);
243 
244 	return 0;
245 }
246 
247 /**
248  * vmw_resource_relocations_free - Free all relocations on a list
249  *
250  * @list: Pointer to the head of the relocation list.
251  */
252 static void vmw_resource_relocations_free(struct list_head *list)
253 {
254 	struct vmw_resource_relocation *rel, *n;
255 
256 	list_for_each_entry_safe(rel, n, list, head) {
257 		list_del(&rel->head);
258 		kfree(rel);
259 	}
260 }
261 
262 /**
263  * vmw_resource_relocations_apply - Apply all relocations on a list
264  *
265  * @cb: Pointer to the start of the command buffer bein patch. This need
266  * not be the same buffer as the one being parsed when the relocation
267  * list was built, but the contents must be the same modulo the
268  * resource ids.
269  * @list: Pointer to the head of the relocation list.
270  */
271 static void vmw_resource_relocations_apply(uint32_t *cb,
272 					   struct list_head *list)
273 {
274 	struct vmw_resource_relocation *rel;
275 
276 	list_for_each_entry(rel, list, head) {
277 		if (likely(rel->res != NULL))
278 			cb[rel->offset] = rel->res->id;
279 		else
280 			cb[rel->offset] = SVGA_3D_CMD_NOP;
281 	}
282 }
283 
284 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
285 			   struct vmw_sw_context *sw_context,
286 			   SVGA3dCmdHeader *header)
287 {
288 	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
289 }
290 
291 static int vmw_cmd_ok(struct vmw_private *dev_priv,
292 		      struct vmw_sw_context *sw_context,
293 		      SVGA3dCmdHeader *header)
294 {
295 	return 0;
296 }
297 
298 /**
299  * vmw_bo_to_validate_list - add a bo to a validate list
300  *
301  * @sw_context: The software context used for this command submission batch.
302  * @bo: The buffer object to add.
303  * @validate_as_mob: Validate this buffer as a MOB.
304  * @p_val_node: If non-NULL Will be updated with the validate node number
305  * on return.
306  *
307  * Returns -EINVAL if the limit of number of buffer objects per command
308  * submission is reached.
309  */
310 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
311 				   struct ttm_buffer_object *bo,
312 				   bool validate_as_mob,
313 				   uint32_t *p_val_node)
314 {
315 	uint32_t val_node;
316 	struct vmw_validate_buffer *vval_buf;
317 	struct ttm_validate_buffer *val_buf;
318 	struct drm_hash_item *hash;
319 	int ret;
320 
321 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
322 				    &hash) == 0)) {
323 		vval_buf = container_of(hash, struct vmw_validate_buffer,
324 					hash);
325 		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
326 			DRM_ERROR("Inconsistent buffer usage.\n");
327 			return -EINVAL;
328 		}
329 		val_buf = &vval_buf->base;
330 		val_node = vval_buf - sw_context->val_bufs;
331 	} else {
332 		val_node = sw_context->cur_val_buf;
333 		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
334 			DRM_ERROR("Max number of DMA buffers per submission "
335 				  "exceeded.\n");
336 			return -EINVAL;
337 		}
338 		vval_buf = &sw_context->val_bufs[val_node];
339 		vval_buf->hash.key = (unsigned long) bo;
340 		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341 		if (unlikely(ret != 0)) {
342 			DRM_ERROR("Failed to initialize a buffer validation "
343 				  "entry.\n");
344 			return ret;
345 		}
346 		++sw_context->cur_val_buf;
347 		val_buf = &vval_buf->base;
348 		val_buf->bo = ttm_bo_reference(bo);
349 		val_buf->reserved = false;
350 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
351 		vval_buf->validate_as_mob = validate_as_mob;
352 	}
353 
354 	sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
355 
356 	if (p_val_node)
357 		*p_val_node = val_node;
358 
359 	return 0;
360 }
361 
362 /**
363  * vmw_resources_reserve - Reserve all resources on the sw_context's
364  * resource list.
365  *
366  * @sw_context: Pointer to the software context.
367  *
368  * Note that since vmware's command submission currently is protected by
369  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
370  * since only a single thread at once will attempt this.
371  */
372 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
373 {
374 	struct vmw_resource_val_node *val;
375 	int ret;
376 
377 	list_for_each_entry(val, &sw_context->resource_list, head) {
378 		struct vmw_resource *res = val->res;
379 
380 		ret = vmw_resource_reserve(res, val->no_buffer_needed);
381 		if (unlikely(ret != 0))
382 			return ret;
383 
384 		if (res->backup) {
385 			struct ttm_buffer_object *bo = &res->backup->base;
386 
387 			ret = vmw_bo_to_validate_list
388 				(sw_context, bo,
389 				 vmw_resource_needs_backup(res), NULL);
390 
391 			if (unlikely(ret != 0))
392 				return ret;
393 		}
394 	}
395 	return 0;
396 }
397 
398 /**
399  * vmw_resources_validate - Validate all resources on the sw_context's
400  * resource list.
401  *
402  * @sw_context: Pointer to the software context.
403  *
404  * Before this function is called, all resource backup buffers must have
405  * been validated.
406  */
407 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
408 {
409 	struct vmw_resource_val_node *val;
410 	int ret;
411 
412 	list_for_each_entry(val, &sw_context->resource_list, head) {
413 		struct vmw_resource *res = val->res;
414 
415 		ret = vmw_resource_validate(res);
416 		if (unlikely(ret != 0)) {
417 			if (ret != -ERESTARTSYS)
418 				DRM_ERROR("Failed to validate resource.\n");
419 			return ret;
420 		}
421 	}
422 	return 0;
423 }
424 
425 
426 /**
427  * vmw_cmd_res_reloc_add - Add a resource to a software context's
428  * relocation- and validation lists.
429  *
430  * @dev_priv: Pointer to a struct vmw_private identifying the device.
431  * @sw_context: Pointer to the software context.
432  * @res_type: Resource type.
433  * @id_loc: Pointer to where the id that needs translation is located.
434  * @res: Valid pointer to a struct vmw_resource.
435  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
436  * used for this resource is returned here.
437  */
438 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
439 				 struct vmw_sw_context *sw_context,
440 				 enum vmw_res_type res_type,
441 				 uint32_t *id_loc,
442 				 struct vmw_resource *res,
443 				 struct vmw_resource_val_node **p_val)
444 {
445 	int ret;
446 	struct vmw_resource_val_node *node;
447 
448 	*p_val = NULL;
449 	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
450 					  res,
451 					  id_loc - sw_context->buf_start);
452 	if (unlikely(ret != 0))
453 		return ret;
454 
455 	ret = vmw_resource_val_add(sw_context, res, &node);
456 	if (unlikely(ret != 0))
457 		return ret;
458 
459 	if (res_type == vmw_res_context && dev_priv->has_mob &&
460 	    node->first_usage) {
461 
462 		/*
463 		 * Put contexts first on the list to be able to exit
464 		 * list traversal for contexts early.
465 		 */
466 		list_del(&node->head);
467 		list_add(&node->head, &sw_context->resource_list);
468 
469 		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
470 		if (unlikely(ret != 0))
471 			return ret;
472 		node->staged_bindings =
473 			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
474 		if (node->staged_bindings == NULL) {
475 			DRM_ERROR("Failed to allocate context binding "
476 				  "information.\n");
477 			return -ENOMEM;
478 		}
479 		INIT_LIST_HEAD(&node->staged_bindings->list);
480 	}
481 
482 	if (p_val)
483 		*p_val = node;
484 
485 	return 0;
486 }
487 
488 
489 /**
490  * vmw_cmd_res_check - Check that a resource is present and if so, put it
491  * on the resource validate list unless it's already there.
492  *
493  * @dev_priv: Pointer to a device private structure.
494  * @sw_context: Pointer to the software context.
495  * @res_type: Resource type.
496  * @converter: User-space visisble type specific information.
497  * @id_loc: Pointer to the location in the command buffer currently being
498  * parsed from where the user-space resource id handle is located.
499  * @p_val: Pointer to pointer to resource validalidation node. Populated
500  * on exit.
501  */
502 static int
503 vmw_cmd_res_check(struct vmw_private *dev_priv,
504 		  struct vmw_sw_context *sw_context,
505 		  enum vmw_res_type res_type,
506 		  const struct vmw_user_resource_conv *converter,
507 		  uint32_t *id_loc,
508 		  struct vmw_resource_val_node **p_val)
509 {
510 	struct vmw_res_cache_entry *rcache =
511 		&sw_context->res_cache[res_type];
512 	struct vmw_resource *res;
513 	struct vmw_resource_val_node *node;
514 	int ret;
515 
516 	if (*id_loc == SVGA3D_INVALID_ID) {
517 		if (p_val)
518 			*p_val = NULL;
519 		if (res_type == vmw_res_context) {
520 			DRM_ERROR("Illegal context invalid id.\n");
521 			return -EINVAL;
522 		}
523 		return 0;
524 	}
525 
526 	/*
527 	 * Fastpath in case of repeated commands referencing the same
528 	 * resource
529 	 */
530 
531 	if (likely(rcache->valid && *id_loc == rcache->handle)) {
532 		const struct vmw_resource *res = rcache->res;
533 
534 		rcache->node->first_usage = false;
535 		if (p_val)
536 			*p_val = rcache->node;
537 
538 		return vmw_resource_relocation_add
539 			(&sw_context->res_relocations, res,
540 			 id_loc - sw_context->buf_start);
541 	}
542 
543 	ret = vmw_user_resource_lookup_handle(dev_priv,
544 					      sw_context->fp->tfile,
545 					      *id_loc,
546 					      converter,
547 					      &res);
548 	if (unlikely(ret != 0)) {
549 		DRM_ERROR("Could not find or use resource 0x%08x.\n",
550 			  (unsigned) *id_loc);
551 		dump_stack();
552 		return ret;
553 	}
554 
555 	rcache->valid = true;
556 	rcache->res = res;
557 	rcache->handle = *id_loc;
558 
559 	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
560 				    res, &node);
561 	if (unlikely(ret != 0))
562 		goto out_no_reloc;
563 
564 	rcache->node = node;
565 	if (p_val)
566 		*p_val = node;
567 	vmw_resource_unreference(&res);
568 	return 0;
569 
570 out_no_reloc:
571 	BUG_ON(sw_context->error_resource != NULL);
572 	sw_context->error_resource = res;
573 
574 	return ret;
575 }
576 
577 /**
578  * vmw_rebind_contexts - Rebind all resources previously bound to
579  * referenced contexts.
580  *
581  * @sw_context: Pointer to the software context.
582  *
583  * Rebind context binding points that have been scrubbed because of eviction.
584  */
585 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
586 {
587 	struct vmw_resource_val_node *val;
588 	int ret;
589 
590 	list_for_each_entry(val, &sw_context->resource_list, head) {
591 		if (unlikely(!val->staged_bindings))
592 			break;
593 
594 		ret = vmw_context_rebind_all(val->res);
595 		if (unlikely(ret != 0)) {
596 			if (ret != -ERESTARTSYS)
597 				DRM_ERROR("Failed to rebind context.\n");
598 			return ret;
599 		}
600 	}
601 
602 	return 0;
603 }
604 
605 /**
606  * vmw_cmd_cid_check - Check a command header for valid context information.
607  *
608  * @dev_priv: Pointer to a device private structure.
609  * @sw_context: Pointer to the software context.
610  * @header: A command header with an embedded user-space context handle.
611  *
612  * Convenience function: Call vmw_cmd_res_check with the user-space context
613  * handle embedded in @header.
614  */
615 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
616 			     struct vmw_sw_context *sw_context,
617 			     SVGA3dCmdHeader *header)
618 {
619 	struct vmw_cid_cmd {
620 		SVGA3dCmdHeader header;
621 		uint32_t cid;
622 	} *cmd;
623 
624 	cmd = container_of(header, struct vmw_cid_cmd, header);
625 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
626 				 user_context_converter, &cmd->cid, NULL);
627 }
628 
629 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
630 					   struct vmw_sw_context *sw_context,
631 					   SVGA3dCmdHeader *header)
632 {
633 	struct vmw_sid_cmd {
634 		SVGA3dCmdHeader header;
635 		SVGA3dCmdSetRenderTarget body;
636 	} *cmd;
637 	struct vmw_resource_val_node *ctx_node;
638 	struct vmw_resource_val_node *res_node;
639 	int ret;
640 
641 	cmd = container_of(header, struct vmw_sid_cmd, header);
642 
643 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
644 				user_context_converter, &cmd->body.cid,
645 				&ctx_node);
646 	if (unlikely(ret != 0))
647 		return ret;
648 
649 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
650 				user_surface_converter,
651 				&cmd->body.target.sid, &res_node);
652 	if (unlikely(ret != 0))
653 		return ret;
654 
655 	if (dev_priv->has_mob) {
656 		struct vmw_ctx_bindinfo bi;
657 
658 		bi.ctx = ctx_node->res;
659 		bi.res = res_node ? res_node->res : NULL;
660 		bi.bt = vmw_ctx_binding_rt;
661 		bi.i1.rt_type = cmd->body.type;
662 		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
663 	}
664 
665 	return 0;
666 }
667 
668 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
669 				      struct vmw_sw_context *sw_context,
670 				      SVGA3dCmdHeader *header)
671 {
672 	struct vmw_sid_cmd {
673 		SVGA3dCmdHeader header;
674 		SVGA3dCmdSurfaceCopy body;
675 	} *cmd;
676 	int ret;
677 
678 	cmd = container_of(header, struct vmw_sid_cmd, header);
679 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
680 				user_surface_converter,
681 				&cmd->body.src.sid, NULL);
682 	if (unlikely(ret != 0))
683 		return ret;
684 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
685 				 user_surface_converter,
686 				 &cmd->body.dest.sid, NULL);
687 }
688 
689 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
690 				     struct vmw_sw_context *sw_context,
691 				     SVGA3dCmdHeader *header)
692 {
693 	struct vmw_sid_cmd {
694 		SVGA3dCmdHeader header;
695 		SVGA3dCmdSurfaceStretchBlt body;
696 	} *cmd;
697 	int ret;
698 
699 	cmd = container_of(header, struct vmw_sid_cmd, header);
700 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
701 				user_surface_converter,
702 				&cmd->body.src.sid, NULL);
703 	if (unlikely(ret != 0))
704 		return ret;
705 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
706 				 user_surface_converter,
707 				 &cmd->body.dest.sid, NULL);
708 }
709 
710 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
711 					 struct vmw_sw_context *sw_context,
712 					 SVGA3dCmdHeader *header)
713 {
714 	struct vmw_sid_cmd {
715 		SVGA3dCmdHeader header;
716 		SVGA3dCmdBlitSurfaceToScreen body;
717 	} *cmd;
718 
719 	cmd = container_of(header, struct vmw_sid_cmd, header);
720 
721 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
722 				 user_surface_converter,
723 				 &cmd->body.srcImage.sid, NULL);
724 }
725 
726 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
727 				 struct vmw_sw_context *sw_context,
728 				 SVGA3dCmdHeader *header)
729 {
730 	struct vmw_sid_cmd {
731 		SVGA3dCmdHeader header;
732 		SVGA3dCmdPresent body;
733 	} *cmd;
734 
735 
736 	cmd = container_of(header, struct vmw_sid_cmd, header);
737 
738 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
739 				 user_surface_converter, &cmd->body.sid,
740 				 NULL);
741 }
742 
743 /**
744  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
745  *
746  * @dev_priv: The device private structure.
747  * @new_query_bo: The new buffer holding query results.
748  * @sw_context: The software context used for this command submission.
749  *
750  * This function checks whether @new_query_bo is suitable for holding
751  * query results, and if another buffer currently is pinned for query
752  * results. If so, the function prepares the state of @sw_context for
753  * switching pinned buffers after successful submission of the current
754  * command batch.
755  */
756 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
757 				       struct ttm_buffer_object *new_query_bo,
758 				       struct vmw_sw_context *sw_context)
759 {
760 	struct vmw_res_cache_entry *ctx_entry =
761 		&sw_context->res_cache[vmw_res_context];
762 	int ret;
763 
764 	BUG_ON(!ctx_entry->valid);
765 	sw_context->last_query_ctx = ctx_entry->res;
766 
767 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
768 
769 		if (unlikely(new_query_bo->num_pages > 4)) {
770 			DRM_ERROR("Query buffer too large.\n");
771 			return -EINVAL;
772 		}
773 
774 		if (unlikely(sw_context->cur_query_bo != NULL)) {
775 			sw_context->needs_post_query_barrier = true;
776 			ret = vmw_bo_to_validate_list(sw_context,
777 						      sw_context->cur_query_bo,
778 						      dev_priv->has_mob, NULL);
779 			if (unlikely(ret != 0))
780 				return ret;
781 		}
782 		sw_context->cur_query_bo = new_query_bo;
783 
784 		ret = vmw_bo_to_validate_list(sw_context,
785 					      dev_priv->dummy_query_bo,
786 					      dev_priv->has_mob, NULL);
787 		if (unlikely(ret != 0))
788 			return ret;
789 
790 	}
791 
792 	return 0;
793 }
794 
795 
796 /**
797  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
798  *
799  * @dev_priv: The device private structure.
800  * @sw_context: The software context used for this command submission batch.
801  *
802  * This function will check if we're switching query buffers, and will then,
803  * issue a dummy occlusion query wait used as a query barrier. When the fence
804  * object following that query wait has signaled, we are sure that all
805  * preceding queries have finished, and the old query buffer can be unpinned.
806  * However, since both the new query buffer and the old one are fenced with
807  * that fence, we can do an asynchronus unpin now, and be sure that the
808  * old query buffer won't be moved until the fence has signaled.
809  *
810  * As mentioned above, both the new - and old query buffers need to be fenced
811  * using a sequence emitted *after* calling this function.
812  */
813 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
814 				     struct vmw_sw_context *sw_context)
815 {
816 	/*
817 	 * The validate list should still hold references to all
818 	 * contexts here.
819 	 */
820 
821 	if (sw_context->needs_post_query_barrier) {
822 		struct vmw_res_cache_entry *ctx_entry =
823 			&sw_context->res_cache[vmw_res_context];
824 		struct vmw_resource *ctx;
825 		int ret;
826 
827 		BUG_ON(!ctx_entry->valid);
828 		ctx = ctx_entry->res;
829 
830 		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
831 
832 		if (unlikely(ret != 0))
833 			DRM_ERROR("Out of fifo space for dummy query.\n");
834 	}
835 
836 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
837 		if (dev_priv->pinned_bo) {
838 			vmw_bo_pin(dev_priv->pinned_bo, false);
839 			ttm_bo_unref(&dev_priv->pinned_bo);
840 		}
841 
842 		if (!sw_context->needs_post_query_barrier) {
843 			vmw_bo_pin(sw_context->cur_query_bo, true);
844 
845 			/*
846 			 * We pin also the dummy_query_bo buffer so that we
847 			 * don't need to validate it when emitting
848 			 * dummy queries in context destroy paths.
849 			 */
850 
851 			vmw_bo_pin(dev_priv->dummy_query_bo, true);
852 			dev_priv->dummy_query_bo_pinned = true;
853 
854 			BUG_ON(sw_context->last_query_ctx == NULL);
855 			dev_priv->query_cid = sw_context->last_query_ctx->id;
856 			dev_priv->query_cid_valid = true;
857 			dev_priv->pinned_bo =
858 				ttm_bo_reference(sw_context->cur_query_bo);
859 		}
860 	}
861 }
862 
863 /**
864  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
865  * handle to a MOB id.
866  *
867  * @dev_priv: Pointer to a device private structure.
868  * @sw_context: The software context used for this command batch validation.
869  * @id: Pointer to the user-space handle to be translated.
870  * @vmw_bo_p: Points to a location that, on successful return will carry
871  * a reference-counted pointer to the DMA buffer identified by the
872  * user-space handle in @id.
873  *
874  * This function saves information needed to translate a user-space buffer
875  * handle to a MOB id. The translation does not take place immediately, but
876  * during a call to vmw_apply_relocations(). This function builds a relocation
877  * list and a list of buffers to validate. The former needs to be freed using
878  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
879  * needs to be freed using vmw_clear_validations.
880  */
881 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
882 				 struct vmw_sw_context *sw_context,
883 				 SVGAMobId *id,
884 				 struct vmw_dma_buffer **vmw_bo_p)
885 {
886 	struct vmw_dma_buffer *vmw_bo = NULL;
887 	struct ttm_buffer_object *bo;
888 	uint32_t handle = *id;
889 	struct vmw_relocation *reloc;
890 	int ret;
891 
892 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
893 	if (unlikely(ret != 0)) {
894 		DRM_ERROR("Could not find or use MOB buffer.\n");
895 		return -EINVAL;
896 	}
897 	bo = &vmw_bo->base;
898 
899 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
900 		DRM_ERROR("Max number relocations per submission"
901 			  " exceeded\n");
902 		ret = -EINVAL;
903 		goto out_no_reloc;
904 	}
905 
906 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
907 	reloc->mob_loc = id;
908 	reloc->location = NULL;
909 
910 	ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
911 	if (unlikely(ret != 0))
912 		goto out_no_reloc;
913 
914 	*vmw_bo_p = vmw_bo;
915 	return 0;
916 
917 out_no_reloc:
918 	vmw_dmabuf_unreference(&vmw_bo);
919 	vmw_bo_p = NULL;
920 	return ret;
921 }
922 
923 /**
924  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
925  * handle to a valid SVGAGuestPtr
926  *
927  * @dev_priv: Pointer to a device private structure.
928  * @sw_context: The software context used for this command batch validation.
929  * @ptr: Pointer to the user-space handle to be translated.
930  * @vmw_bo_p: Points to a location that, on successful return will carry
931  * a reference-counted pointer to the DMA buffer identified by the
932  * user-space handle in @id.
933  *
934  * This function saves information needed to translate a user-space buffer
935  * handle to a valid SVGAGuestPtr. The translation does not take place
936  * immediately, but during a call to vmw_apply_relocations().
937  * This function builds a relocation list and a list of buffers to validate.
938  * The former needs to be freed using either vmw_apply_relocations() or
939  * vmw_free_relocations(). The latter needs to be freed using
940  * vmw_clear_validations.
941  */
942 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
943 				   struct vmw_sw_context *sw_context,
944 				   SVGAGuestPtr *ptr,
945 				   struct vmw_dma_buffer **vmw_bo_p)
946 {
947 	struct vmw_dma_buffer *vmw_bo = NULL;
948 	struct ttm_buffer_object *bo;
949 	uint32_t handle = ptr->gmrId;
950 	struct vmw_relocation *reloc;
951 	int ret;
952 
953 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
954 	if (unlikely(ret != 0)) {
955 		DRM_ERROR("Could not find or use GMR region.\n");
956 		return -EINVAL;
957 	}
958 	bo = &vmw_bo->base;
959 
960 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
961 		DRM_ERROR("Max number relocations per submission"
962 			  " exceeded\n");
963 		ret = -EINVAL;
964 		goto out_no_reloc;
965 	}
966 
967 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
968 	reloc->location = ptr;
969 
970 	ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
971 	if (unlikely(ret != 0))
972 		goto out_no_reloc;
973 
974 	*vmw_bo_p = vmw_bo;
975 	return 0;
976 
977 out_no_reloc:
978 	vmw_dmabuf_unreference(&vmw_bo);
979 	vmw_bo_p = NULL;
980 	return ret;
981 }
982 
983 /**
984  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
985  *
986  * @dev_priv: Pointer to a device private struct.
987  * @sw_context: The software context used for this command submission.
988  * @header: Pointer to the command header in the command stream.
989  */
990 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
991 				  struct vmw_sw_context *sw_context,
992 				  SVGA3dCmdHeader *header)
993 {
994 	struct vmw_begin_gb_query_cmd {
995 		SVGA3dCmdHeader header;
996 		SVGA3dCmdBeginGBQuery q;
997 	} *cmd;
998 
999 	cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1000 			   header);
1001 
1002 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1003 				 user_context_converter, &cmd->q.cid,
1004 				 NULL);
1005 }
1006 
1007 /**
1008  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1009  *
1010  * @dev_priv: Pointer to a device private struct.
1011  * @sw_context: The software context used for this command submission.
1012  * @header: Pointer to the command header in the command stream.
1013  */
1014 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1015 			       struct vmw_sw_context *sw_context,
1016 			       SVGA3dCmdHeader *header)
1017 {
1018 	struct vmw_begin_query_cmd {
1019 		SVGA3dCmdHeader header;
1020 		SVGA3dCmdBeginQuery q;
1021 	} *cmd;
1022 
1023 	cmd = container_of(header, struct vmw_begin_query_cmd,
1024 			   header);
1025 
1026 	if (unlikely(dev_priv->has_mob)) {
1027 		struct {
1028 			SVGA3dCmdHeader header;
1029 			SVGA3dCmdBeginGBQuery q;
1030 		} gb_cmd;
1031 
1032 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1033 
1034 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1035 		gb_cmd.header.size = cmd->header.size;
1036 		gb_cmd.q.cid = cmd->q.cid;
1037 		gb_cmd.q.type = cmd->q.type;
1038 
1039 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1040 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1041 	}
1042 
1043 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1044 				 user_context_converter, &cmd->q.cid,
1045 				 NULL);
1046 }
1047 
1048 /**
1049  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1050  *
1051  * @dev_priv: Pointer to a device private struct.
1052  * @sw_context: The software context used for this command submission.
1053  * @header: Pointer to the command header in the command stream.
1054  */
1055 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1056 				struct vmw_sw_context *sw_context,
1057 				SVGA3dCmdHeader *header)
1058 {
1059 	struct vmw_dma_buffer *vmw_bo;
1060 	struct vmw_query_cmd {
1061 		SVGA3dCmdHeader header;
1062 		SVGA3dCmdEndGBQuery q;
1063 	} *cmd;
1064 	int ret;
1065 
1066 	cmd = container_of(header, struct vmw_query_cmd, header);
1067 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1068 	if (unlikely(ret != 0))
1069 		return ret;
1070 
1071 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1072 				    &cmd->q.mobid,
1073 				    &vmw_bo);
1074 	if (unlikely(ret != 0))
1075 		return ret;
1076 
1077 	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1078 
1079 	vmw_dmabuf_unreference(&vmw_bo);
1080 	return ret;
1081 }
1082 
1083 /**
1084  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1085  *
1086  * @dev_priv: Pointer to a device private struct.
1087  * @sw_context: The software context used for this command submission.
1088  * @header: Pointer to the command header in the command stream.
1089  */
1090 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1091 			     struct vmw_sw_context *sw_context,
1092 			     SVGA3dCmdHeader *header)
1093 {
1094 	struct vmw_dma_buffer *vmw_bo;
1095 	struct vmw_query_cmd {
1096 		SVGA3dCmdHeader header;
1097 		SVGA3dCmdEndQuery q;
1098 	} *cmd;
1099 	int ret;
1100 
1101 	cmd = container_of(header, struct vmw_query_cmd, header);
1102 	if (dev_priv->has_mob) {
1103 		struct {
1104 			SVGA3dCmdHeader header;
1105 			SVGA3dCmdEndGBQuery q;
1106 		} gb_cmd;
1107 
1108 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1109 
1110 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1111 		gb_cmd.header.size = cmd->header.size;
1112 		gb_cmd.q.cid = cmd->q.cid;
1113 		gb_cmd.q.type = cmd->q.type;
1114 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1115 		gb_cmd.q.offset = cmd->q.guestResult.offset;
1116 
1117 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1118 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1119 	}
1120 
1121 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1122 	if (unlikely(ret != 0))
1123 		return ret;
1124 
1125 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1126 				      &cmd->q.guestResult,
1127 				      &vmw_bo);
1128 	if (unlikely(ret != 0))
1129 		return ret;
1130 
1131 	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1132 
1133 	vmw_dmabuf_unreference(&vmw_bo);
1134 	return ret;
1135 }
1136 
1137 /**
1138  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1139  *
1140  * @dev_priv: Pointer to a device private struct.
1141  * @sw_context: The software context used for this command submission.
1142  * @header: Pointer to the command header in the command stream.
1143  */
1144 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1145 				 struct vmw_sw_context *sw_context,
1146 				 SVGA3dCmdHeader *header)
1147 {
1148 	struct vmw_dma_buffer *vmw_bo;
1149 	struct vmw_query_cmd {
1150 		SVGA3dCmdHeader header;
1151 		SVGA3dCmdWaitForGBQuery q;
1152 	} *cmd;
1153 	int ret;
1154 
1155 	cmd = container_of(header, struct vmw_query_cmd, header);
1156 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1157 	if (unlikely(ret != 0))
1158 		return ret;
1159 
1160 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1161 				    &cmd->q.mobid,
1162 				    &vmw_bo);
1163 	if (unlikely(ret != 0))
1164 		return ret;
1165 
1166 	vmw_dmabuf_unreference(&vmw_bo);
1167 	return 0;
1168 }
1169 
1170 /**
1171  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1172  *
1173  * @dev_priv: Pointer to a device private struct.
1174  * @sw_context: The software context used for this command submission.
1175  * @header: Pointer to the command header in the command stream.
1176  */
1177 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1178 			      struct vmw_sw_context *sw_context,
1179 			      SVGA3dCmdHeader *header)
1180 {
1181 	struct vmw_dma_buffer *vmw_bo;
1182 	struct vmw_query_cmd {
1183 		SVGA3dCmdHeader header;
1184 		SVGA3dCmdWaitForQuery q;
1185 	} *cmd;
1186 	int ret;
1187 
1188 	cmd = container_of(header, struct vmw_query_cmd, header);
1189 	if (dev_priv->has_mob) {
1190 		struct {
1191 			SVGA3dCmdHeader header;
1192 			SVGA3dCmdWaitForGBQuery q;
1193 		} gb_cmd;
1194 
1195 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1196 
1197 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1198 		gb_cmd.header.size = cmd->header.size;
1199 		gb_cmd.q.cid = cmd->q.cid;
1200 		gb_cmd.q.type = cmd->q.type;
1201 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1202 		gb_cmd.q.offset = cmd->q.guestResult.offset;
1203 
1204 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1205 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1206 	}
1207 
1208 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1209 	if (unlikely(ret != 0))
1210 		return ret;
1211 
1212 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1213 				      &cmd->q.guestResult,
1214 				      &vmw_bo);
1215 	if (unlikely(ret != 0))
1216 		return ret;
1217 
1218 	vmw_dmabuf_unreference(&vmw_bo);
1219 	return 0;
1220 }
1221 
1222 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1223 		       struct vmw_sw_context *sw_context,
1224 		       SVGA3dCmdHeader *header)
1225 {
1226 	struct vmw_dma_buffer *vmw_bo = NULL;
1227 	struct vmw_surface *srf = NULL;
1228 	struct vmw_dma_cmd {
1229 		SVGA3dCmdHeader header;
1230 		SVGA3dCmdSurfaceDMA dma;
1231 	} *cmd;
1232 	int ret;
1233 	SVGA3dCmdSurfaceDMASuffix *suffix;
1234 	uint32_t bo_size;
1235 
1236 	cmd = container_of(header, struct vmw_dma_cmd, header);
1237 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1238 					       header->size - sizeof(*suffix));
1239 
1240 	/* Make sure device and verifier stays in sync. */
1241 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1242 		DRM_ERROR("Invalid DMA suffix size.\n");
1243 		return -EINVAL;
1244 	}
1245 
1246 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1247 				      &cmd->dma.guest.ptr,
1248 				      &vmw_bo);
1249 	if (unlikely(ret != 0))
1250 		return ret;
1251 
1252 	/* Make sure DMA doesn't cross BO boundaries. */
1253 	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1254 	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1255 		DRM_ERROR("Invalid DMA offset.\n");
1256 		return -EINVAL;
1257 	}
1258 
1259 	bo_size -= cmd->dma.guest.ptr.offset;
1260 	if (unlikely(suffix->maximumOffset > bo_size))
1261 		suffix->maximumOffset = bo_size;
1262 
1263 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1264 				user_surface_converter, &cmd->dma.host.sid,
1265 				NULL);
1266 	if (unlikely(ret != 0)) {
1267 		if (unlikely(ret != -ERESTARTSYS))
1268 			DRM_ERROR("could not find surface for DMA.\n");
1269 		goto out_no_surface;
1270 	}
1271 
1272 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1273 
1274 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1275 			     header);
1276 
1277 out_no_surface:
1278 	vmw_dmabuf_unreference(&vmw_bo);
1279 	return ret;
1280 }
1281 
1282 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1283 			struct vmw_sw_context *sw_context,
1284 			SVGA3dCmdHeader *header)
1285 {
1286 	struct vmw_draw_cmd {
1287 		SVGA3dCmdHeader header;
1288 		SVGA3dCmdDrawPrimitives body;
1289 	} *cmd;
1290 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1291 		(unsigned long)header + sizeof(*cmd));
1292 	SVGA3dPrimitiveRange *range;
1293 	uint32_t i;
1294 	uint32_t maxnum;
1295 	int ret;
1296 
1297 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1298 	if (unlikely(ret != 0))
1299 		return ret;
1300 
1301 	cmd = container_of(header, struct vmw_draw_cmd, header);
1302 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1303 
1304 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1305 		DRM_ERROR("Illegal number of vertex declarations.\n");
1306 		return -EINVAL;
1307 	}
1308 
1309 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1310 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1311 					user_surface_converter,
1312 					&decl->array.surfaceId, NULL);
1313 		if (unlikely(ret != 0))
1314 			return ret;
1315 	}
1316 
1317 	maxnum = (header->size - sizeof(cmd->body) -
1318 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1319 	if (unlikely(cmd->body.numRanges > maxnum)) {
1320 		DRM_ERROR("Illegal number of index ranges.\n");
1321 		return -EINVAL;
1322 	}
1323 
1324 	range = (SVGA3dPrimitiveRange *) decl;
1325 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1326 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1327 					user_surface_converter,
1328 					&range->indexArray.surfaceId, NULL);
1329 		if (unlikely(ret != 0))
1330 			return ret;
1331 	}
1332 	return 0;
1333 }
1334 
1335 
1336 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1337 			     struct vmw_sw_context *sw_context,
1338 			     SVGA3dCmdHeader *header)
1339 {
1340 	struct vmw_tex_state_cmd {
1341 		SVGA3dCmdHeader header;
1342 		SVGA3dCmdSetTextureState state;
1343 	} *cmd;
1344 
1345 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1346 	  ((unsigned long) header + header->size + sizeof(header));
1347 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1348 		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1349 	struct vmw_resource_val_node *ctx_node;
1350 	struct vmw_resource_val_node *res_node;
1351 	int ret;
1352 
1353 	cmd = container_of(header, struct vmw_tex_state_cmd,
1354 			   header);
1355 
1356 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1357 				user_context_converter, &cmd->state.cid,
1358 				&ctx_node);
1359 	if (unlikely(ret != 0))
1360 		return ret;
1361 
1362 	for (; cur_state < last_state; ++cur_state) {
1363 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1364 			continue;
1365 
1366 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1367 					user_surface_converter,
1368 					&cur_state->value, &res_node);
1369 		if (unlikely(ret != 0))
1370 			return ret;
1371 
1372 		if (dev_priv->has_mob) {
1373 			struct vmw_ctx_bindinfo bi;
1374 
1375 			bi.ctx = ctx_node->res;
1376 			bi.res = res_node ? res_node->res : NULL;
1377 			bi.bt = vmw_ctx_binding_tex;
1378 			bi.i1.texture_stage = cur_state->stage;
1379 			vmw_context_binding_add(ctx_node->staged_bindings,
1380 						&bi);
1381 		}
1382 	}
1383 
1384 	return 0;
1385 }
1386 
1387 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1388 				      struct vmw_sw_context *sw_context,
1389 				      void *buf)
1390 {
1391 	struct vmw_dma_buffer *vmw_bo;
1392 	int ret;
1393 
1394 	struct {
1395 		uint32_t header;
1396 		SVGAFifoCmdDefineGMRFB body;
1397 	} *cmd = buf;
1398 
1399 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1400 				      &cmd->body.ptr,
1401 				      &vmw_bo);
1402 	if (unlikely(ret != 0))
1403 		return ret;
1404 
1405 	vmw_dmabuf_unreference(&vmw_bo);
1406 
1407 	return ret;
1408 }
1409 
1410 /**
1411  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1412  *
1413  * @dev_priv: Pointer to a device private struct.
1414  * @sw_context: The software context being used for this batch.
1415  * @res_type: The resource type.
1416  * @converter: Information about user-space binding for this resource type.
1417  * @res_id: Pointer to the user-space resource handle in the command stream.
1418  * @buf_id: Pointer to the user-space backup buffer handle in the command
1419  * stream.
1420  * @backup_offset: Offset of backup into MOB.
1421  *
1422  * This function prepares for registering a switch of backup buffers
1423  * in the resource metadata just prior to unreserving.
1424  */
1425 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1426 				 struct vmw_sw_context *sw_context,
1427 				 enum vmw_res_type res_type,
1428 				 const struct vmw_user_resource_conv
1429 				 *converter,
1430 				 uint32_t *res_id,
1431 				 uint32_t *buf_id,
1432 				 unsigned long backup_offset)
1433 {
1434 	int ret;
1435 	struct vmw_dma_buffer *dma_buf;
1436 	struct vmw_resource_val_node *val_node;
1437 
1438 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1439 				converter, res_id, &val_node);
1440 	if (unlikely(ret != 0))
1441 		return ret;
1442 
1443 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1444 	if (unlikely(ret != 0))
1445 		return ret;
1446 
1447 	if (val_node->first_usage)
1448 		val_node->no_buffer_needed = true;
1449 
1450 	vmw_dmabuf_unreference(&val_node->new_backup);
1451 	val_node->new_backup = dma_buf;
1452 	val_node->new_backup_offset = backup_offset;
1453 
1454 	return 0;
1455 }
1456 
1457 /**
1458  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1459  * command
1460  *
1461  * @dev_priv: Pointer to a device private struct.
1462  * @sw_context: The software context being used for this batch.
1463  * @header: Pointer to the command header in the command stream.
1464  */
1465 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1466 				   struct vmw_sw_context *sw_context,
1467 				   SVGA3dCmdHeader *header)
1468 {
1469 	struct vmw_bind_gb_surface_cmd {
1470 		SVGA3dCmdHeader header;
1471 		SVGA3dCmdBindGBSurface body;
1472 	} *cmd;
1473 
1474 	cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1475 
1476 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1477 				     user_surface_converter,
1478 				     &cmd->body.sid, &cmd->body.mobid,
1479 				     0);
1480 }
1481 
1482 /**
1483  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1484  * command
1485  *
1486  * @dev_priv: Pointer to a device private struct.
1487  * @sw_context: The software context being used for this batch.
1488  * @header: Pointer to the command header in the command stream.
1489  */
1490 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1491 				   struct vmw_sw_context *sw_context,
1492 				   SVGA3dCmdHeader *header)
1493 {
1494 	struct vmw_gb_surface_cmd {
1495 		SVGA3dCmdHeader header;
1496 		SVGA3dCmdUpdateGBImage body;
1497 	} *cmd;
1498 
1499 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1500 
1501 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1502 				 user_surface_converter,
1503 				 &cmd->body.image.sid, NULL);
1504 }
1505 
1506 /**
1507  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1508  * command
1509  *
1510  * @dev_priv: Pointer to a device private struct.
1511  * @sw_context: The software context being used for this batch.
1512  * @header: Pointer to the command header in the command stream.
1513  */
1514 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1515 				     struct vmw_sw_context *sw_context,
1516 				     SVGA3dCmdHeader *header)
1517 {
1518 	struct vmw_gb_surface_cmd {
1519 		SVGA3dCmdHeader header;
1520 		SVGA3dCmdUpdateGBSurface body;
1521 	} *cmd;
1522 
1523 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1524 
1525 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1526 				 user_surface_converter,
1527 				 &cmd->body.sid, NULL);
1528 }
1529 
1530 /**
1531  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1532  * command
1533  *
1534  * @dev_priv: Pointer to a device private struct.
1535  * @sw_context: The software context being used for this batch.
1536  * @header: Pointer to the command header in the command stream.
1537  */
1538 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1539 				     struct vmw_sw_context *sw_context,
1540 				     SVGA3dCmdHeader *header)
1541 {
1542 	struct vmw_gb_surface_cmd {
1543 		SVGA3dCmdHeader header;
1544 		SVGA3dCmdReadbackGBImage body;
1545 	} *cmd;
1546 
1547 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1548 
1549 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1550 				 user_surface_converter,
1551 				 &cmd->body.image.sid, NULL);
1552 }
1553 
1554 /**
1555  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1556  * command
1557  *
1558  * @dev_priv: Pointer to a device private struct.
1559  * @sw_context: The software context being used for this batch.
1560  * @header: Pointer to the command header in the command stream.
1561  */
1562 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1563 				       struct vmw_sw_context *sw_context,
1564 				       SVGA3dCmdHeader *header)
1565 {
1566 	struct vmw_gb_surface_cmd {
1567 		SVGA3dCmdHeader header;
1568 		SVGA3dCmdReadbackGBSurface body;
1569 	} *cmd;
1570 
1571 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1572 
1573 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1574 				 user_surface_converter,
1575 				 &cmd->body.sid, NULL);
1576 }
1577 
1578 /**
1579  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1580  * command
1581  *
1582  * @dev_priv: Pointer to a device private struct.
1583  * @sw_context: The software context being used for this batch.
1584  * @header: Pointer to the command header in the command stream.
1585  */
1586 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1587 				       struct vmw_sw_context *sw_context,
1588 				       SVGA3dCmdHeader *header)
1589 {
1590 	struct vmw_gb_surface_cmd {
1591 		SVGA3dCmdHeader header;
1592 		SVGA3dCmdInvalidateGBImage body;
1593 	} *cmd;
1594 
1595 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1596 
1597 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1598 				 user_surface_converter,
1599 				 &cmd->body.image.sid, NULL);
1600 }
1601 
1602 /**
1603  * vmw_cmd_invalidate_gb_surface - Validate an
1604  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1605  *
1606  * @dev_priv: Pointer to a device private struct.
1607  * @sw_context: The software context being used for this batch.
1608  * @header: Pointer to the command header in the command stream.
1609  */
1610 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1611 					 struct vmw_sw_context *sw_context,
1612 					 SVGA3dCmdHeader *header)
1613 {
1614 	struct vmw_gb_surface_cmd {
1615 		SVGA3dCmdHeader header;
1616 		SVGA3dCmdInvalidateGBSurface body;
1617 	} *cmd;
1618 
1619 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1620 
1621 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1622 				 user_surface_converter,
1623 				 &cmd->body.sid, NULL);
1624 }
1625 
1626 
1627 /**
1628  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1629  * command
1630  *
1631  * @dev_priv: Pointer to a device private struct.
1632  * @sw_context: The software context being used for this batch.
1633  * @header: Pointer to the command header in the command stream.
1634  */
1635 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1636 				 struct vmw_sw_context *sw_context,
1637 				 SVGA3dCmdHeader *header)
1638 {
1639 	struct vmw_shader_define_cmd {
1640 		SVGA3dCmdHeader header;
1641 		SVGA3dCmdDefineShader body;
1642 	} *cmd;
1643 	int ret;
1644 	size_t size;
1645 	struct vmw_resource_val_node *val;
1646 
1647 	cmd = container_of(header, struct vmw_shader_define_cmd,
1648 			   header);
1649 
1650 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1651 				user_context_converter, &cmd->body.cid,
1652 				&val);
1653 	if (unlikely(ret != 0))
1654 		return ret;
1655 
1656 	if (unlikely(!dev_priv->has_mob))
1657 		return 0;
1658 
1659 	size = cmd->header.size - sizeof(cmd->body);
1660 	ret = vmw_compat_shader_add(dev_priv,
1661 				    vmw_context_res_man(val->res),
1662 				    cmd->body.shid, cmd + 1,
1663 				    cmd->body.type, size,
1664 				    &sw_context->staged_cmd_res);
1665 	if (unlikely(ret != 0))
1666 		return ret;
1667 
1668 	return vmw_resource_relocation_add(&sw_context->res_relocations,
1669 					   NULL, &cmd->header.id -
1670 					   sw_context->buf_start);
1671 
1672 	return 0;
1673 }
1674 
1675 /**
1676  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1677  * command
1678  *
1679  * @dev_priv: Pointer to a device private struct.
1680  * @sw_context: The software context being used for this batch.
1681  * @header: Pointer to the command header in the command stream.
1682  */
1683 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1684 				  struct vmw_sw_context *sw_context,
1685 				  SVGA3dCmdHeader *header)
1686 {
1687 	struct vmw_shader_destroy_cmd {
1688 		SVGA3dCmdHeader header;
1689 		SVGA3dCmdDestroyShader body;
1690 	} *cmd;
1691 	int ret;
1692 	struct vmw_resource_val_node *val;
1693 
1694 	cmd = container_of(header, struct vmw_shader_destroy_cmd,
1695 			   header);
1696 
1697 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1698 				user_context_converter, &cmd->body.cid,
1699 				&val);
1700 	if (unlikely(ret != 0))
1701 		return ret;
1702 
1703 	if (unlikely(!dev_priv->has_mob))
1704 		return 0;
1705 
1706 	ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
1707 				       cmd->body.shid,
1708 				       cmd->body.type,
1709 				       &sw_context->staged_cmd_res);
1710 	if (unlikely(ret != 0))
1711 		return ret;
1712 
1713 	return vmw_resource_relocation_add(&sw_context->res_relocations,
1714 					   NULL, &cmd->header.id -
1715 					   sw_context->buf_start);
1716 
1717 	return 0;
1718 }
1719 
1720 /**
1721  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1722  * command
1723  *
1724  * @dev_priv: Pointer to a device private struct.
1725  * @sw_context: The software context being used for this batch.
1726  * @header: Pointer to the command header in the command stream.
1727  */
1728 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1729 			      struct vmw_sw_context *sw_context,
1730 			      SVGA3dCmdHeader *header)
1731 {
1732 	struct vmw_set_shader_cmd {
1733 		SVGA3dCmdHeader header;
1734 		SVGA3dCmdSetShader body;
1735 	} *cmd;
1736 	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1737 	struct vmw_ctx_bindinfo bi;
1738 	struct vmw_resource *res = NULL;
1739 	int ret;
1740 
1741 	cmd = container_of(header, struct vmw_set_shader_cmd,
1742 			   header);
1743 
1744 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1745 				user_context_converter, &cmd->body.cid,
1746 				&ctx_node);
1747 	if (unlikely(ret != 0))
1748 		return ret;
1749 
1750 	if (!dev_priv->has_mob)
1751 		return 0;
1752 
1753 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
1754 		res = vmw_compat_shader_lookup
1755 			(vmw_context_res_man(ctx_node->res),
1756 			 cmd->body.shid,
1757 			 cmd->body.type);
1758 
1759 		if (!IS_ERR(res)) {
1760 			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1761 						    vmw_res_shader,
1762 						    &cmd->body.shid, res,
1763 						    &res_node);
1764 			vmw_resource_unreference(&res);
1765 			if (unlikely(ret != 0))
1766 				return ret;
1767 		}
1768 	}
1769 
1770 	if (!res_node) {
1771 		ret = vmw_cmd_res_check(dev_priv, sw_context,
1772 					vmw_res_shader,
1773 					user_shader_converter,
1774 					&cmd->body.shid, &res_node);
1775 		if (unlikely(ret != 0))
1776 			return ret;
1777 	}
1778 
1779 	bi.ctx = ctx_node->res;
1780 	bi.res = res_node ? res_node->res : NULL;
1781 	bi.bt = vmw_ctx_binding_shader;
1782 	bi.i1.shader_type = cmd->body.type;
1783 	return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1784 }
1785 
1786 /**
1787  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1788  * command
1789  *
1790  * @dev_priv: Pointer to a device private struct.
1791  * @sw_context: The software context being used for this batch.
1792  * @header: Pointer to the command header in the command stream.
1793  */
1794 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1795 				    struct vmw_sw_context *sw_context,
1796 				    SVGA3dCmdHeader *header)
1797 {
1798 	struct vmw_set_shader_const_cmd {
1799 		SVGA3dCmdHeader header;
1800 		SVGA3dCmdSetShaderConst body;
1801 	} *cmd;
1802 	int ret;
1803 
1804 	cmd = container_of(header, struct vmw_set_shader_const_cmd,
1805 			   header);
1806 
1807 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1808 				user_context_converter, &cmd->body.cid,
1809 				NULL);
1810 	if (unlikely(ret != 0))
1811 		return ret;
1812 
1813 	if (dev_priv->has_mob)
1814 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1815 
1816 	return 0;
1817 }
1818 
1819 /**
1820  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1821  * command
1822  *
1823  * @dev_priv: Pointer to a device private struct.
1824  * @sw_context: The software context being used for this batch.
1825  * @header: Pointer to the command header in the command stream.
1826  */
1827 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1828 				  struct vmw_sw_context *sw_context,
1829 				  SVGA3dCmdHeader *header)
1830 {
1831 	struct vmw_bind_gb_shader_cmd {
1832 		SVGA3dCmdHeader header;
1833 		SVGA3dCmdBindGBShader body;
1834 	} *cmd;
1835 
1836 	cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1837 			   header);
1838 
1839 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1840 				     user_shader_converter,
1841 				     &cmd->body.shid, &cmd->body.mobid,
1842 				     cmd->body.offsetInBytes);
1843 }
1844 
1845 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1846 				struct vmw_sw_context *sw_context,
1847 				void *buf, uint32_t *size)
1848 {
1849 	uint32_t size_remaining = *size;
1850 	uint32_t cmd_id;
1851 
1852 	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1853 	switch (cmd_id) {
1854 	case SVGA_CMD_UPDATE:
1855 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1856 		break;
1857 	case SVGA_CMD_DEFINE_GMRFB:
1858 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1859 		break;
1860 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1861 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1862 		break;
1863 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1864 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1865 		break;
1866 	default:
1867 		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1868 		return -EINVAL;
1869 	}
1870 
1871 	if (*size > size_remaining) {
1872 		DRM_ERROR("Invalid SVGA command (size mismatch):"
1873 			  " %u.\n", cmd_id);
1874 		return -EINVAL;
1875 	}
1876 
1877 	if (unlikely(!sw_context->kernel)) {
1878 		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1879 		return -EPERM;
1880 	}
1881 
1882 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1883 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1884 
1885 	return 0;
1886 }
1887 
1888 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1889 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1890 		    false, false, false),
1891 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1892 		    false, false, false),
1893 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1894 		    true, false, false),
1895 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1896 		    true, false, false),
1897 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1898 		    true, false, false),
1899 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1900 		    false, false, false),
1901 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1902 		    false, false, false),
1903 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1904 		    true, false, false),
1905 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1906 		    true, false, false),
1907 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1908 		    true, false, false),
1909 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1910 		    &vmw_cmd_set_render_target_check, true, false, false),
1911 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1912 		    true, false, false),
1913 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1914 		    true, false, false),
1915 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1916 		    true, false, false),
1917 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1918 		    true, false, false),
1919 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1920 		    true, false, false),
1921 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1922 		    true, false, false),
1923 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1924 		    true, false, false),
1925 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1926 		    false, false, false),
1927 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1928 		    true, false, false),
1929 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1930 		    true, false, false),
1931 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1932 		    true, false, false),
1933 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1934 		    true, false, false),
1935 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1936 		    true, false, false),
1937 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1938 		    true, false, false),
1939 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1940 		    true, false, false),
1941 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1942 		    true, false, false),
1943 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1944 		    true, false, false),
1945 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1946 		    true, false, false),
1947 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1948 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
1949 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1950 		    false, false, false),
1951 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1952 		    false, false, false),
1953 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1954 		    false, false, false),
1955 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1956 		    false, false, false),
1957 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1958 		    false, false, false),
1959 	VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1960 		    false, false, false),
1961 	VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1962 		    false, false, false),
1963 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1964 		    false, false, false),
1965 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1966 		    false, false, false),
1967 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1968 		    false, false, false),
1969 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1970 		    false, false, false),
1971 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1972 		    false, false, false),
1973 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1974 		    false, false, false),
1975 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1976 		    false, false, true),
1977 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1978 		    false, false, true),
1979 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1980 		    false, false, true),
1981 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1982 		    false, false, true),
1983 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1984 		    false, false, true),
1985 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1986 		    false, false, true),
1987 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1988 		    false, false, true),
1989 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1990 		    false, false, true),
1991 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1992 		    true, false, true),
1993 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1994 		    false, false, true),
1995 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1996 		    true, false, true),
1997 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1998 		    &vmw_cmd_update_gb_surface, true, false, true),
1999 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
2000 		    &vmw_cmd_readback_gb_image, true, false, true),
2001 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2002 		    &vmw_cmd_readback_gb_surface, true, false, true),
2003 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2004 		    &vmw_cmd_invalidate_gb_image, true, false, true),
2005 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2006 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
2007 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2008 		    false, false, true),
2009 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2010 		    false, false, true),
2011 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2012 		    false, false, true),
2013 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2014 		    false, false, true),
2015 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2016 		    false, false, true),
2017 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2018 		    false, false, true),
2019 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2020 		    true, false, true),
2021 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2022 		    false, false, true),
2023 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2024 		    false, false, false),
2025 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2026 		    true, false, true),
2027 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2028 		    true, false, true),
2029 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2030 		    true, false, true),
2031 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2032 		    true, false, true),
2033 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2034 		    false, false, true),
2035 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2036 		    false, false, true),
2037 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2038 		    false, false, true),
2039 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2040 		    false, false, true),
2041 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2042 		    false, false, true),
2043 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2044 		    false, false, true),
2045 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2046 		    false, false, true),
2047 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2048 		    false, false, true),
2049 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2050 		    false, false, true),
2051 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2052 		    false, false, true),
2053 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2054 		    true, false, true)
2055 };
2056 
2057 static int vmw_cmd_check(struct vmw_private *dev_priv,
2058 			 struct vmw_sw_context *sw_context,
2059 			 void *buf, uint32_t *size)
2060 {
2061 	uint32_t cmd_id;
2062 	uint32_t size_remaining = *size;
2063 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2064 	int ret;
2065 	const struct vmw_cmd_entry *entry;
2066 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2067 
2068 	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
2069 	/* Handle any none 3D commands */
2070 	if (unlikely(cmd_id < SVGA_CMD_MAX))
2071 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2072 
2073 
2074 	cmd_id = le32_to_cpu(header->id);
2075 	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
2076 
2077 	cmd_id -= SVGA_3D_CMD_BASE;
2078 	if (unlikely(*size > size_remaining))
2079 		goto out_invalid;
2080 
2081 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
2082 		goto out_invalid;
2083 
2084 	entry = &vmw_cmd_entries[cmd_id];
2085 	if (unlikely(!entry->func))
2086 		goto out_invalid;
2087 
2088 	if (unlikely(!entry->user_allow && !sw_context->kernel))
2089 		goto out_privileged;
2090 
2091 	if (unlikely(entry->gb_disable && gb))
2092 		goto out_old;
2093 
2094 	if (unlikely(entry->gb_enable && !gb))
2095 		goto out_new;
2096 
2097 	ret = entry->func(dev_priv, sw_context, header);
2098 	if (unlikely(ret != 0))
2099 		goto out_invalid;
2100 
2101 	return 0;
2102 out_invalid:
2103 	DRM_ERROR("Invalid SVGA3D command: %d\n",
2104 		  cmd_id + SVGA_3D_CMD_BASE);
2105 	return -EINVAL;
2106 out_privileged:
2107 	DRM_ERROR("Privileged SVGA3D command: %d\n",
2108 		  cmd_id + SVGA_3D_CMD_BASE);
2109 	return -EPERM;
2110 out_old:
2111 	DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2112 		  cmd_id + SVGA_3D_CMD_BASE);
2113 	return -EINVAL;
2114 out_new:
2115 	DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
2116 		  cmd_id + SVGA_3D_CMD_BASE);
2117 	return -EINVAL;
2118 }
2119 
2120 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2121 			     struct vmw_sw_context *sw_context,
2122 			     void *buf,
2123 			     uint32_t size)
2124 {
2125 	int32_t cur_size = size;
2126 	int ret;
2127 
2128 	sw_context->buf_start = buf;
2129 
2130 	while (cur_size > 0) {
2131 		size = cur_size;
2132 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2133 		if (unlikely(ret != 0))
2134 			return ret;
2135 		buf = (void *)((unsigned long) buf + size);
2136 		cur_size -= size;
2137 	}
2138 
2139 	if (unlikely(cur_size != 0)) {
2140 		DRM_ERROR("Command verifier out of sync.\n");
2141 		return -EINVAL;
2142 	}
2143 
2144 	return 0;
2145 }
2146 
2147 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2148 {
2149 	sw_context->cur_reloc = 0;
2150 }
2151 
2152 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2153 {
2154 	uint32_t i;
2155 	struct vmw_relocation *reloc;
2156 	struct ttm_validate_buffer *validate;
2157 	struct ttm_buffer_object *bo;
2158 
2159 	for (i = 0; i < sw_context->cur_reloc; ++i) {
2160 		reloc = &sw_context->relocs[i];
2161 		validate = &sw_context->val_bufs[reloc->index].base;
2162 		bo = validate->bo;
2163 		switch (bo->mem.mem_type) {
2164 		case TTM_PL_VRAM:
2165 			reloc->location->offset += bo->offset;
2166 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
2167 			break;
2168 		case VMW_PL_GMR:
2169 			reloc->location->gmrId = bo->mem.start;
2170 			break;
2171 		case VMW_PL_MOB:
2172 			*reloc->mob_loc = bo->mem.start;
2173 			break;
2174 		default:
2175 			BUG();
2176 		}
2177 	}
2178 	vmw_free_relocations(sw_context);
2179 }
2180 
2181 /**
2182  * vmw_resource_list_unrefererence - Free up a resource list and unreference
2183  * all resources referenced by it.
2184  *
2185  * @list: The resource list.
2186  */
2187 static void vmw_resource_list_unreference(struct list_head *list)
2188 {
2189 	struct vmw_resource_val_node *val, *val_next;
2190 
2191 	/*
2192 	 * Drop references to resources held during command submission.
2193 	 */
2194 
2195 	list_for_each_entry_safe(val, val_next, list, head) {
2196 		list_del_init(&val->head);
2197 		vmw_resource_unreference(&val->res);
2198 		if (unlikely(val->staged_bindings))
2199 			kfree(val->staged_bindings);
2200 		kfree(val);
2201 	}
2202 }
2203 
2204 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2205 {
2206 	struct vmw_validate_buffer *entry, *next;
2207 	struct vmw_resource_val_node *val;
2208 
2209 	/*
2210 	 * Drop references to DMA buffers held during command submission.
2211 	 */
2212 	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
2213 				 base.head) {
2214 		list_del(&entry->base.head);
2215 		ttm_bo_unref(&entry->base.bo);
2216 		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
2217 		sw_context->cur_val_buf--;
2218 	}
2219 	BUG_ON(sw_context->cur_val_buf != 0);
2220 
2221 	list_for_each_entry(val, &sw_context->resource_list, head)
2222 		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2223 }
2224 
2225 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2226 				      struct ttm_buffer_object *bo,
2227 				      bool validate_as_mob)
2228 {
2229 	int ret;
2230 
2231 
2232 	/*
2233 	 * Don't validate pinned buffers.
2234 	 */
2235 
2236 	if (bo == dev_priv->pinned_bo ||
2237 	    (bo == dev_priv->dummy_query_bo &&
2238 	     dev_priv->dummy_query_bo_pinned))
2239 		return 0;
2240 
2241 	if (validate_as_mob)
2242 		return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
2243 
2244 	/**
2245 	 * Put BO in VRAM if there is space, otherwise as a GMR.
2246 	 * If there is no space in VRAM and GMR ids are all used up,
2247 	 * start evicting GMRs to make room. If the DMA buffer can't be
2248 	 * used as a GMR, this will return -ENOMEM.
2249 	 */
2250 
2251 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
2252 	if (likely(ret == 0 || ret == -ERESTARTSYS))
2253 		return ret;
2254 
2255 	/**
2256 	 * If that failed, try VRAM again, this time evicting
2257 	 * previous contents.
2258 	 */
2259 
2260 	DRM_INFO("Falling through to VRAM.\n");
2261 	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
2262 	return ret;
2263 }
2264 
2265 static int vmw_validate_buffers(struct vmw_private *dev_priv,
2266 				struct vmw_sw_context *sw_context)
2267 {
2268 	struct vmw_validate_buffer *entry;
2269 	int ret;
2270 
2271 	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2272 		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2273 						 entry->validate_as_mob);
2274 		if (unlikely(ret != 0))
2275 			return ret;
2276 	}
2277 	return 0;
2278 }
2279 
2280 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2281 				 uint32_t size)
2282 {
2283 	if (likely(sw_context->cmd_bounce_size >= size))
2284 		return 0;
2285 
2286 	if (sw_context->cmd_bounce_size == 0)
2287 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2288 
2289 	while (sw_context->cmd_bounce_size < size) {
2290 		sw_context->cmd_bounce_size =
2291 			PAGE_ALIGN(sw_context->cmd_bounce_size +
2292 				   (sw_context->cmd_bounce_size >> 1));
2293 	}
2294 
2295 	if (sw_context->cmd_bounce != NULL)
2296 		vfree(sw_context->cmd_bounce);
2297 
2298 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2299 
2300 	if (sw_context->cmd_bounce == NULL) {
2301 		DRM_ERROR("Failed to allocate command bounce buffer.\n");
2302 		sw_context->cmd_bounce_size = 0;
2303 		return -ENOMEM;
2304 	}
2305 
2306 	return 0;
2307 }
2308 
2309 /**
2310  * vmw_execbuf_fence_commands - create and submit a command stream fence
2311  *
2312  * Creates a fence object and submits a command stream marker.
2313  * If this fails for some reason, We sync the fifo and return NULL.
2314  * It is then safe to fence buffers with a NULL pointer.
2315  *
2316  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2317  * a userspace handle if @p_handle is not NULL, otherwise not.
2318  */
2319 
2320 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2321 			       struct vmw_private *dev_priv,
2322 			       struct vmw_fence_obj **p_fence,
2323 			       uint32_t *p_handle)
2324 {
2325 	uint32_t sequence;
2326 	int ret;
2327 	bool synced = false;
2328 
2329 	/* p_handle implies file_priv. */
2330 	BUG_ON(p_handle != NULL && file_priv == NULL);
2331 
2332 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
2333 	if (unlikely(ret != 0)) {
2334 		DRM_ERROR("Fence submission error. Syncing.\n");
2335 		synced = true;
2336 	}
2337 
2338 	if (p_handle != NULL)
2339 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2340 					    sequence,
2341 					    DRM_VMW_FENCE_FLAG_EXEC,
2342 					    p_fence, p_handle);
2343 	else
2344 		ret = vmw_fence_create(dev_priv->fman, sequence,
2345 				       DRM_VMW_FENCE_FLAG_EXEC,
2346 				       p_fence);
2347 
2348 	if (unlikely(ret != 0 && !synced)) {
2349 		(void) vmw_fallback_wait(dev_priv, false, false,
2350 					 sequence, false,
2351 					 VMW_FENCE_WAIT_TIMEOUT);
2352 		*p_fence = NULL;
2353 	}
2354 
2355 	return 0;
2356 }
2357 
2358 /**
2359  * vmw_execbuf_copy_fence_user - copy fence object information to
2360  * user-space.
2361  *
2362  * @dev_priv: Pointer to a vmw_private struct.
2363  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2364  * @ret: Return value from fence object creation.
2365  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2366  * which the information should be copied.
2367  * @fence: Pointer to the fenc object.
2368  * @fence_handle: User-space fence handle.
2369  *
2370  * This function copies fence information to user-space. If copying fails,
2371  * The user-space struct drm_vmw_fence_rep::error member is hopefully
2372  * left untouched, and if it's preloaded with an -EFAULT by user-space,
2373  * the error will hopefully be detected.
2374  * Also if copying fails, user-space will be unable to signal the fence
2375  * object so we wait for it immediately, and then unreference the
2376  * user-space reference.
2377  */
2378 void
2379 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2380 			    struct vmw_fpriv *vmw_fp,
2381 			    int ret,
2382 			    struct drm_vmw_fence_rep __user *user_fence_rep,
2383 			    struct vmw_fence_obj *fence,
2384 			    uint32_t fence_handle)
2385 {
2386 	struct drm_vmw_fence_rep fence_rep;
2387 
2388 	if (user_fence_rep == NULL)
2389 		return;
2390 
2391 	memset(&fence_rep, 0, sizeof(fence_rep));
2392 
2393 	fence_rep.error = ret;
2394 	if (ret == 0) {
2395 		BUG_ON(fence == NULL);
2396 
2397 		fence_rep.handle = fence_handle;
2398 		fence_rep.seqno = fence->seqno;
2399 		vmw_update_seqno(dev_priv, &dev_priv->fifo);
2400 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
2401 	}
2402 
2403 	/*
2404 	 * copy_to_user errors will be detected by user space not
2405 	 * seeing fence_rep::error filled in. Typically
2406 	 * user-space would have pre-set that member to -EFAULT.
2407 	 */
2408 	ret = copy_to_user(user_fence_rep, &fence_rep,
2409 			   sizeof(fence_rep));
2410 
2411 	/*
2412 	 * User-space lost the fence object. We need to sync
2413 	 * and unreference the handle.
2414 	 */
2415 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2416 		ttm_ref_object_base_unref(vmw_fp->tfile,
2417 					  fence_handle, TTM_REF_USAGE);
2418 		DRM_ERROR("Fence copy error. Syncing.\n");
2419 		(void) vmw_fence_obj_wait(fence, fence->signal_mask,
2420 					  false, false,
2421 					  VMW_FENCE_WAIT_TIMEOUT);
2422 	}
2423 }
2424 
2425 
2426 
2427 int vmw_execbuf_process(struct drm_file *file_priv,
2428 			struct vmw_private *dev_priv,
2429 			void __user *user_commands,
2430 			void *kernel_commands,
2431 			uint32_t command_size,
2432 			uint64_t throttle_us,
2433 			struct drm_vmw_fence_rep __user *user_fence_rep,
2434 			struct vmw_fence_obj **out_fence)
2435 {
2436 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
2437 	struct vmw_fence_obj *fence = NULL;
2438 	struct vmw_resource *error_resource;
2439 	struct list_head resource_list;
2440 	struct ww_acquire_ctx ticket;
2441 	uint32_t handle;
2442 	void *cmd;
2443 	int ret;
2444 
2445 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2446 	if (unlikely(ret != 0))
2447 		return -ERESTARTSYS;
2448 
2449 	if (kernel_commands == NULL) {
2450 		sw_context->kernel = false;
2451 
2452 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
2453 		if (unlikely(ret != 0))
2454 			goto out_unlock;
2455 
2456 
2457 		ret = copy_from_user(sw_context->cmd_bounce,
2458 				     user_commands, command_size);
2459 
2460 		if (unlikely(ret != 0)) {
2461 			ret = -EFAULT;
2462 			DRM_ERROR("Failed copying commands.\n");
2463 			goto out_unlock;
2464 		}
2465 		kernel_commands = sw_context->cmd_bounce;
2466 	} else
2467 		sw_context->kernel = true;
2468 
2469 	sw_context->fp = vmw_fpriv(file_priv);
2470 	sw_context->cur_reloc = 0;
2471 	sw_context->cur_val_buf = 0;
2472 	sw_context->fence_flags = 0;
2473 	INIT_LIST_HEAD(&sw_context->resource_list);
2474 	sw_context->cur_query_bo = dev_priv->pinned_bo;
2475 	sw_context->last_query_ctx = NULL;
2476 	sw_context->needs_post_query_barrier = false;
2477 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2478 	INIT_LIST_HEAD(&sw_context->validate_nodes);
2479 	INIT_LIST_HEAD(&sw_context->res_relocations);
2480 	if (!sw_context->res_ht_initialized) {
2481 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2482 		if (unlikely(ret != 0))
2483 			goto out_unlock;
2484 		sw_context->res_ht_initialized = true;
2485 	}
2486 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
2487 
2488 	INIT_LIST_HEAD(&resource_list);
2489 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2490 				command_size);
2491 	if (unlikely(ret != 0))
2492 		goto out_err_nores;
2493 
2494 	ret = vmw_resources_reserve(sw_context);
2495 	if (unlikely(ret != 0))
2496 		goto out_err_nores;
2497 
2498 	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
2499 	if (unlikely(ret != 0))
2500 		goto out_err;
2501 
2502 	ret = vmw_validate_buffers(dev_priv, sw_context);
2503 	if (unlikely(ret != 0))
2504 		goto out_err;
2505 
2506 	ret = vmw_resources_validate(sw_context);
2507 	if (unlikely(ret != 0))
2508 		goto out_err;
2509 
2510 	if (throttle_us) {
2511 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2512 				   throttle_us);
2513 
2514 		if (unlikely(ret != 0))
2515 			goto out_err;
2516 	}
2517 
2518 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2519 	if (unlikely(ret != 0)) {
2520 		ret = -ERESTARTSYS;
2521 		goto out_err;
2522 	}
2523 
2524 	if (dev_priv->has_mob) {
2525 		ret = vmw_rebind_contexts(sw_context);
2526 		if (unlikely(ret != 0))
2527 			goto out_unlock_binding;
2528 	}
2529 
2530 	cmd = vmw_fifo_reserve(dev_priv, command_size);
2531 	if (unlikely(cmd == NULL)) {
2532 		DRM_ERROR("Failed reserving fifo space for commands.\n");
2533 		ret = -ENOMEM;
2534 		goto out_unlock_binding;
2535 	}
2536 
2537 	vmw_apply_relocations(sw_context);
2538 	memcpy(cmd, kernel_commands, command_size);
2539 
2540 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2541 	vmw_resource_relocations_free(&sw_context->res_relocations);
2542 
2543 	vmw_fifo_commit(dev_priv, command_size);
2544 
2545 	vmw_query_bo_switch_commit(dev_priv, sw_context);
2546 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2547 					 &fence,
2548 					 (user_fence_rep) ? &handle : NULL);
2549 	/*
2550 	 * This error is harmless, because if fence submission fails,
2551 	 * vmw_fifo_send_fence will sync. The error will be propagated to
2552 	 * user-space in @fence_rep
2553 	 */
2554 
2555 	if (ret != 0)
2556 		DRM_ERROR("Fence submission error. Syncing.\n");
2557 
2558 	vmw_resource_list_unreserve(&sw_context->resource_list, false);
2559 	mutex_unlock(&dev_priv->binding_mutex);
2560 
2561 	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2562 				    (void *) fence);
2563 
2564 	if (unlikely(dev_priv->pinned_bo != NULL &&
2565 		     !dev_priv->query_cid_valid))
2566 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
2567 
2568 	vmw_clear_validations(sw_context);
2569 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2570 				    user_fence_rep, fence, handle);
2571 
2572 	/* Don't unreference when handing fence out */
2573 	if (unlikely(out_fence != NULL)) {
2574 		*out_fence = fence;
2575 		fence = NULL;
2576 	} else if (likely(fence != NULL)) {
2577 		vmw_fence_obj_unreference(&fence);
2578 	}
2579 
2580 	list_splice_init(&sw_context->resource_list, &resource_list);
2581 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
2582 	mutex_unlock(&dev_priv->cmdbuf_mutex);
2583 
2584 	/*
2585 	 * Unreference resources outside of the cmdbuf_mutex to
2586 	 * avoid deadlocks in resource destruction paths.
2587 	 */
2588 	vmw_resource_list_unreference(&resource_list);
2589 
2590 	return 0;
2591 
2592 out_unlock_binding:
2593 	mutex_unlock(&dev_priv->binding_mutex);
2594 out_err:
2595 	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2596 out_err_nores:
2597 	vmw_resource_list_unreserve(&sw_context->resource_list, true);
2598 	vmw_resource_relocations_free(&sw_context->res_relocations);
2599 	vmw_free_relocations(sw_context);
2600 	vmw_clear_validations(sw_context);
2601 	if (unlikely(dev_priv->pinned_bo != NULL &&
2602 		     !dev_priv->query_cid_valid))
2603 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2604 out_unlock:
2605 	list_splice_init(&sw_context->resource_list, &resource_list);
2606 	error_resource = sw_context->error_resource;
2607 	sw_context->error_resource = NULL;
2608 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
2609 	mutex_unlock(&dev_priv->cmdbuf_mutex);
2610 
2611 	/*
2612 	 * Unreference resources outside of the cmdbuf_mutex to
2613 	 * avoid deadlocks in resource destruction paths.
2614 	 */
2615 	vmw_resource_list_unreference(&resource_list);
2616 	if (unlikely(error_resource != NULL))
2617 		vmw_resource_unreference(&error_resource);
2618 
2619 	return ret;
2620 }
2621 
2622 /**
2623  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2624  *
2625  * @dev_priv: The device private structure.
2626  *
2627  * This function is called to idle the fifo and unpin the query buffer
2628  * if the normal way to do this hits an error, which should typically be
2629  * extremely rare.
2630  */
2631 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2632 {
2633 	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2634 
2635 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2636 	vmw_bo_pin(dev_priv->pinned_bo, false);
2637 	vmw_bo_pin(dev_priv->dummy_query_bo, false);
2638 	dev_priv->dummy_query_bo_pinned = false;
2639 }
2640 
2641 
2642 /**
2643  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2644  * query bo.
2645  *
2646  * @dev_priv: The device private structure.
2647  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2648  * _after_ a query barrier that flushes all queries touching the current
2649  * buffer pointed to by @dev_priv->pinned_bo
2650  *
2651  * This function should be used to unpin the pinned query bo, or
2652  * as a query barrier when we need to make sure that all queries have
2653  * finished before the next fifo command. (For example on hardware
2654  * context destructions where the hardware may otherwise leak unfinished
2655  * queries).
2656  *
2657  * This function does not return any failure codes, but make attempts
2658  * to do safe unpinning in case of errors.
2659  *
2660  * The function will synchronize on the previous query barrier, and will
2661  * thus not finish until that barrier has executed.
2662  *
2663  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2664  * before calling this function.
2665  */
2666 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2667 				     struct vmw_fence_obj *fence)
2668 {
2669 	int ret = 0;
2670 	struct list_head validate_list;
2671 	struct ttm_validate_buffer pinned_val, query_val;
2672 	struct vmw_fence_obj *lfence = NULL;
2673 	struct ww_acquire_ctx ticket;
2674 
2675 	if (dev_priv->pinned_bo == NULL)
2676 		goto out_unlock;
2677 
2678 	INIT_LIST_HEAD(&validate_list);
2679 
2680 	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2681 	list_add_tail(&pinned_val.head, &validate_list);
2682 
2683 	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2684 	list_add_tail(&query_val.head, &validate_list);
2685 
2686 	do {
2687 		ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
2688 	} while (ret == -ERESTARTSYS);
2689 
2690 	if (unlikely(ret != 0)) {
2691 		vmw_execbuf_unpin_panic(dev_priv);
2692 		goto out_no_reserve;
2693 	}
2694 
2695 	if (dev_priv->query_cid_valid) {
2696 		BUG_ON(fence != NULL);
2697 		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2698 		if (unlikely(ret != 0)) {
2699 			vmw_execbuf_unpin_panic(dev_priv);
2700 			goto out_no_emit;
2701 		}
2702 		dev_priv->query_cid_valid = false;
2703 	}
2704 
2705 	vmw_bo_pin(dev_priv->pinned_bo, false);
2706 	vmw_bo_pin(dev_priv->dummy_query_bo, false);
2707 	dev_priv->dummy_query_bo_pinned = false;
2708 
2709 	if (fence == NULL) {
2710 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2711 						  NULL);
2712 		fence = lfence;
2713 	}
2714 	ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2715 	if (lfence != NULL)
2716 		vmw_fence_obj_unreference(&lfence);
2717 
2718 	ttm_bo_unref(&query_val.bo);
2719 	ttm_bo_unref(&pinned_val.bo);
2720 	ttm_bo_unref(&dev_priv->pinned_bo);
2721 
2722 out_unlock:
2723 	return;
2724 
2725 out_no_emit:
2726 	ttm_eu_backoff_reservation(&ticket, &validate_list);
2727 out_no_reserve:
2728 	ttm_bo_unref(&query_val.bo);
2729 	ttm_bo_unref(&pinned_val.bo);
2730 	ttm_bo_unref(&dev_priv->pinned_bo);
2731 }
2732 
2733 /**
2734  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2735  * query bo.
2736  *
2737  * @dev_priv: The device private structure.
2738  *
2739  * This function should be used to unpin the pinned query bo, or
2740  * as a query barrier when we need to make sure that all queries have
2741  * finished before the next fifo command. (For example on hardware
2742  * context destructions where the hardware may otherwise leak unfinished
2743  * queries).
2744  *
2745  * This function does not return any failure codes, but make attempts
2746  * to do safe unpinning in case of errors.
2747  *
2748  * The function will synchronize on the previous query barrier, and will
2749  * thus not finish until that barrier has executed.
2750  */
2751 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2752 {
2753 	mutex_lock(&dev_priv->cmdbuf_mutex);
2754 	if (dev_priv->query_cid_valid)
2755 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2756 	mutex_unlock(&dev_priv->cmdbuf_mutex);
2757 }
2758 
2759 
2760 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2761 		      struct drm_file *file_priv)
2762 {
2763 	struct vmw_private *dev_priv = vmw_priv(dev);
2764 	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2765 	int ret;
2766 
2767 	/*
2768 	 * This will allow us to extend the ioctl argument while
2769 	 * maintaining backwards compatibility:
2770 	 * We take different code paths depending on the value of
2771 	 * arg->version.
2772 	 */
2773 
2774 	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2775 		DRM_ERROR("Incorrect execbuf version.\n");
2776 		DRM_ERROR("You're running outdated experimental "
2777 			  "vmwgfx user-space drivers.");
2778 		return -EINVAL;
2779 	}
2780 
2781 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2782 	if (unlikely(ret != 0))
2783 		return ret;
2784 
2785 	ret = vmw_execbuf_process(file_priv, dev_priv,
2786 				  (void __user *)(unsigned long)arg->commands,
2787 				  NULL, arg->command_size, arg->throttle_us,
2788 				  (void __user *)(unsigned long)arg->fence_rep,
2789 				  NULL);
2790 
2791 	if (unlikely(ret != 0))
2792 		goto out_unlock;
2793 
2794 	vmw_kms_cursor_post_execbuf(dev_priv);
2795 
2796 out_unlock:
2797 	ttm_read_unlock(&dev_priv->reservation_sem);
2798 	return ret;
2799 }
2800