1 /**************************************************************************
2  *
3  * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "ttm/ttm_placement.h"
31 
32 struct vmw_user_context {
33 	struct ttm_base_object base;
34 	struct vmw_resource res;
35 	struct vmw_ctx_binding_state cbs;
36 };
37 
38 
39 
40 typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
41 
42 static void vmw_user_context_free(struct vmw_resource *res);
43 static struct vmw_resource *
44 vmw_user_context_base_to_res(struct ttm_base_object *base);
45 
46 static int vmw_gb_context_create(struct vmw_resource *res);
47 static int vmw_gb_context_bind(struct vmw_resource *res,
48 			       struct ttm_validate_buffer *val_buf);
49 static int vmw_gb_context_unbind(struct vmw_resource *res,
50 				 bool readback,
51 				 struct ttm_validate_buffer *val_buf);
52 static int vmw_gb_context_destroy(struct vmw_resource *res);
53 static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
54 static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
55 					   bool rebind);
56 static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
57 static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
58 static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
59 static uint64_t vmw_user_context_size;
60 
61 static const struct vmw_user_resource_conv user_context_conv = {
62 	.object_type = VMW_RES_CONTEXT,
63 	.base_obj_to_res = vmw_user_context_base_to_res,
64 	.res_free = vmw_user_context_free
65 };
66 
67 const struct vmw_user_resource_conv *user_context_converter =
68 	&user_context_conv;
69 
70 
71 static const struct vmw_res_func vmw_legacy_context_func = {
72 	.res_type = vmw_res_context,
73 	.needs_backup = false,
74 	.may_evict = false,
75 	.type_name = "legacy contexts",
76 	.backup_placement = NULL,
77 	.create = NULL,
78 	.destroy = NULL,
79 	.bind = NULL,
80 	.unbind = NULL
81 };
82 
83 static const struct vmw_res_func vmw_gb_context_func = {
84 	.res_type = vmw_res_context,
85 	.needs_backup = true,
86 	.may_evict = true,
87 	.type_name = "guest backed contexts",
88 	.backup_placement = &vmw_mob_placement,
89 	.create = vmw_gb_context_create,
90 	.destroy = vmw_gb_context_destroy,
91 	.bind = vmw_gb_context_bind,
92 	.unbind = vmw_gb_context_unbind
93 };
94 
95 static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
96 	[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
97 	[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
98 	[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
99 
100 /**
101  * Context management:
102  */
103 
104 static void vmw_hw_context_destroy(struct vmw_resource *res)
105 {
106 
107 	struct vmw_private *dev_priv = res->dev_priv;
108 	struct {
109 		SVGA3dCmdHeader header;
110 		SVGA3dCmdDestroyContext body;
111 	} *cmd;
112 
113 
114 	if (res->func->destroy == vmw_gb_context_destroy) {
115 		mutex_lock(&dev_priv->cmdbuf_mutex);
116 		mutex_lock(&dev_priv->binding_mutex);
117 		(void) vmw_context_binding_state_kill
118 			(&container_of(res, struct vmw_user_context, res)->cbs);
119 		(void) vmw_gb_context_destroy(res);
120 		if (dev_priv->pinned_bo != NULL &&
121 		    !dev_priv->query_cid_valid)
122 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
123 		mutex_unlock(&dev_priv->binding_mutex);
124 		mutex_unlock(&dev_priv->cmdbuf_mutex);
125 		return;
126 	}
127 
128 	vmw_execbuf_release_pinned_bo(dev_priv);
129 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
130 	if (unlikely(cmd == NULL)) {
131 		DRM_ERROR("Failed reserving FIFO space for surface "
132 			  "destruction.\n");
133 		return;
134 	}
135 
136 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
137 	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
138 	cmd->body.cid = cpu_to_le32(res->id);
139 
140 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
141 	vmw_3d_resource_dec(dev_priv, false);
142 }
143 
144 static int vmw_gb_context_init(struct vmw_private *dev_priv,
145 			       struct vmw_resource *res,
146 			       void (*res_free) (struct vmw_resource *res))
147 {
148 	int ret;
149 	struct vmw_user_context *uctx =
150 		container_of(res, struct vmw_user_context, res);
151 
152 	ret = vmw_resource_init(dev_priv, res, true,
153 				res_free, &vmw_gb_context_func);
154 	res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
155 
156 	if (unlikely(ret != 0)) {
157 		if (res_free)
158 			res_free(res);
159 		else
160 			kfree(res);
161 		return ret;
162 	}
163 
164 	memset(&uctx->cbs, 0, sizeof(uctx->cbs));
165 	INIT_LIST_HEAD(&uctx->cbs.list);
166 
167 	vmw_resource_activate(res, vmw_hw_context_destroy);
168 	return 0;
169 }
170 
171 static int vmw_context_init(struct vmw_private *dev_priv,
172 			    struct vmw_resource *res,
173 			    void (*res_free) (struct vmw_resource *res))
174 {
175 	int ret;
176 
177 	struct {
178 		SVGA3dCmdHeader header;
179 		SVGA3dCmdDefineContext body;
180 	} *cmd;
181 
182 	if (dev_priv->has_mob)
183 		return vmw_gb_context_init(dev_priv, res, res_free);
184 
185 	ret = vmw_resource_init(dev_priv, res, false,
186 				res_free, &vmw_legacy_context_func);
187 
188 	if (unlikely(ret != 0)) {
189 		DRM_ERROR("Failed to allocate a resource id.\n");
190 		goto out_early;
191 	}
192 
193 	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
194 		DRM_ERROR("Out of hw context ids.\n");
195 		vmw_resource_unreference(&res);
196 		return -ENOMEM;
197 	}
198 
199 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
200 	if (unlikely(cmd == NULL)) {
201 		DRM_ERROR("Fifo reserve failed.\n");
202 		vmw_resource_unreference(&res);
203 		return -ENOMEM;
204 	}
205 
206 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
207 	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
208 	cmd->body.cid = cpu_to_le32(res->id);
209 
210 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
211 	(void) vmw_3d_resource_inc(dev_priv, false);
212 	vmw_resource_activate(res, vmw_hw_context_destroy);
213 	return 0;
214 
215 out_early:
216 	if (res_free == NULL)
217 		kfree(res);
218 	else
219 		res_free(res);
220 	return ret;
221 }
222 
223 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
224 {
225 	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
226 	int ret;
227 
228 	if (unlikely(res == NULL))
229 		return NULL;
230 
231 	ret = vmw_context_init(dev_priv, res, NULL);
232 
233 	return (ret == 0) ? res : NULL;
234 }
235 
236 
237 static int vmw_gb_context_create(struct vmw_resource *res)
238 {
239 	struct vmw_private *dev_priv = res->dev_priv;
240 	int ret;
241 	struct {
242 		SVGA3dCmdHeader header;
243 		SVGA3dCmdDefineGBContext body;
244 	} *cmd;
245 
246 	if (likely(res->id != -1))
247 		return 0;
248 
249 	ret = vmw_resource_alloc_id(res);
250 	if (unlikely(ret != 0)) {
251 		DRM_ERROR("Failed to allocate a context id.\n");
252 		goto out_no_id;
253 	}
254 
255 	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
256 		ret = -EBUSY;
257 		goto out_no_fifo;
258 	}
259 
260 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
261 	if (unlikely(cmd == NULL)) {
262 		DRM_ERROR("Failed reserving FIFO space for context "
263 			  "creation.\n");
264 		ret = -ENOMEM;
265 		goto out_no_fifo;
266 	}
267 
268 	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
269 	cmd->header.size = sizeof(cmd->body);
270 	cmd->body.cid = res->id;
271 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
272 	(void) vmw_3d_resource_inc(dev_priv, false);
273 
274 	return 0;
275 
276 out_no_fifo:
277 	vmw_resource_release_id(res);
278 out_no_id:
279 	return ret;
280 }
281 
282 static int vmw_gb_context_bind(struct vmw_resource *res,
283 			       struct ttm_validate_buffer *val_buf)
284 {
285 	struct vmw_private *dev_priv = res->dev_priv;
286 	struct {
287 		SVGA3dCmdHeader header;
288 		SVGA3dCmdBindGBContext body;
289 	} *cmd;
290 	struct ttm_buffer_object *bo = val_buf->bo;
291 
292 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
293 
294 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
295 	if (unlikely(cmd == NULL)) {
296 		DRM_ERROR("Failed reserving FIFO space for context "
297 			  "binding.\n");
298 		return -ENOMEM;
299 	}
300 
301 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
302 	cmd->header.size = sizeof(cmd->body);
303 	cmd->body.cid = res->id;
304 	cmd->body.mobid = bo->mem.start;
305 	cmd->body.validContents = res->backup_dirty;
306 	res->backup_dirty = false;
307 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
308 
309 	return 0;
310 }
311 
312 static int vmw_gb_context_unbind(struct vmw_resource *res,
313 				 bool readback,
314 				 struct ttm_validate_buffer *val_buf)
315 {
316 	struct vmw_private *dev_priv = res->dev_priv;
317 	struct ttm_buffer_object *bo = val_buf->bo;
318 	struct vmw_fence_obj *fence;
319 	struct vmw_user_context *uctx =
320 		container_of(res, struct vmw_user_context, res);
321 
322 	struct {
323 		SVGA3dCmdHeader header;
324 		SVGA3dCmdReadbackGBContext body;
325 	} *cmd1;
326 	struct {
327 		SVGA3dCmdHeader header;
328 		SVGA3dCmdBindGBContext body;
329 	} *cmd2;
330 	uint32_t submit_size;
331 	uint8_t *cmd;
332 
333 
334 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
335 
336 	mutex_lock(&dev_priv->binding_mutex);
337 	vmw_context_binding_state_scrub(&uctx->cbs);
338 
339 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
340 
341 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
342 	if (unlikely(cmd == NULL)) {
343 		DRM_ERROR("Failed reserving FIFO space for context "
344 			  "unbinding.\n");
345 		mutex_unlock(&dev_priv->binding_mutex);
346 		return -ENOMEM;
347 	}
348 
349 	cmd2 = (void *) cmd;
350 	if (readback) {
351 		cmd1 = (void *) cmd;
352 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
353 		cmd1->header.size = sizeof(cmd1->body);
354 		cmd1->body.cid = res->id;
355 		cmd2 = (void *) (&cmd1[1]);
356 	}
357 	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
358 	cmd2->header.size = sizeof(cmd2->body);
359 	cmd2->body.cid = res->id;
360 	cmd2->body.mobid = SVGA3D_INVALID_ID;
361 
362 	vmw_fifo_commit(dev_priv, submit_size);
363 	mutex_unlock(&dev_priv->binding_mutex);
364 
365 	/*
366 	 * Create a fence object and fence the backup buffer.
367 	 */
368 
369 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
370 					  &fence, NULL);
371 
372 	vmw_fence_single_bo(bo, fence);
373 
374 	if (likely(fence != NULL))
375 		vmw_fence_obj_unreference(&fence);
376 
377 	return 0;
378 }
379 
380 static int vmw_gb_context_destroy(struct vmw_resource *res)
381 {
382 	struct vmw_private *dev_priv = res->dev_priv;
383 	struct {
384 		SVGA3dCmdHeader header;
385 		SVGA3dCmdDestroyGBContext body;
386 	} *cmd;
387 
388 	if (likely(res->id == -1))
389 		return 0;
390 
391 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
392 	if (unlikely(cmd == NULL)) {
393 		DRM_ERROR("Failed reserving FIFO space for context "
394 			  "destruction.\n");
395 		return -ENOMEM;
396 	}
397 
398 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
399 	cmd->header.size = sizeof(cmd->body);
400 	cmd->body.cid = res->id;
401 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
402 	if (dev_priv->query_cid == res->id)
403 		dev_priv->query_cid_valid = false;
404 	vmw_resource_release_id(res);
405 	vmw_3d_resource_dec(dev_priv, false);
406 
407 	return 0;
408 }
409 
410 /**
411  * User-space context management:
412  */
413 
414 static struct vmw_resource *
415 vmw_user_context_base_to_res(struct ttm_base_object *base)
416 {
417 	return &(container_of(base, struct vmw_user_context, base)->res);
418 }
419 
420 static void vmw_user_context_free(struct vmw_resource *res)
421 {
422 	struct vmw_user_context *ctx =
423 	    container_of(res, struct vmw_user_context, res);
424 	struct vmw_private *dev_priv = res->dev_priv;
425 
426 	ttm_base_object_kfree(ctx, base);
427 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
428 			    vmw_user_context_size);
429 }
430 
431 /**
432  * This function is called when user space has no more references on the
433  * base object. It releases the base-object's reference on the resource object.
434  */
435 
436 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
437 {
438 	struct ttm_base_object *base = *p_base;
439 	struct vmw_user_context *ctx =
440 	    container_of(base, struct vmw_user_context, base);
441 	struct vmw_resource *res = &ctx->res;
442 
443 	*p_base = NULL;
444 	vmw_resource_unreference(&res);
445 }
446 
447 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
448 			      struct drm_file *file_priv)
449 {
450 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
451 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
452 
453 	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
454 }
455 
456 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
457 			     struct drm_file *file_priv)
458 {
459 	struct vmw_private *dev_priv = vmw_priv(dev);
460 	struct vmw_user_context *ctx;
461 	struct vmw_resource *res;
462 	struct vmw_resource *tmp;
463 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
464 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
465 	struct vmw_master *vmaster = vmw_master(file_priv->master);
466 	int ret;
467 
468 
469 	/*
470 	 * Approximate idr memory usage with 128 bytes. It will be limited
471 	 * by maximum number_of contexts anyway.
472 	 */
473 
474 	if (unlikely(vmw_user_context_size == 0))
475 		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
476 
477 	ret = ttm_read_lock(&vmaster->lock, true);
478 	if (unlikely(ret != 0))
479 		return ret;
480 
481 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
482 				   vmw_user_context_size,
483 				   false, true);
484 	if (unlikely(ret != 0)) {
485 		if (ret != -ERESTARTSYS)
486 			DRM_ERROR("Out of graphics memory for context"
487 				  " creation.\n");
488 		goto out_unlock;
489 	}
490 
491 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
492 	if (unlikely(ctx == NULL)) {
493 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
494 				    vmw_user_context_size);
495 		ret = -ENOMEM;
496 		goto out_unlock;
497 	}
498 
499 	res = &ctx->res;
500 	ctx->base.shareable = false;
501 	ctx->base.tfile = NULL;
502 
503 	/*
504 	 * From here on, the destructor takes over resource freeing.
505 	 */
506 
507 	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
508 	if (unlikely(ret != 0))
509 		goto out_unlock;
510 
511 	tmp = vmw_resource_reference(&ctx->res);
512 	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
513 				   &vmw_user_context_base_release, NULL);
514 
515 	if (unlikely(ret != 0)) {
516 		vmw_resource_unreference(&tmp);
517 		goto out_err;
518 	}
519 
520 	arg->cid = ctx->base.hash.key;
521 out_err:
522 	vmw_resource_unreference(&res);
523 out_unlock:
524 	ttm_read_unlock(&vmaster->lock);
525 	return ret;
526 
527 }
528 
529 /**
530  * vmw_context_scrub_shader - scrub a shader binding from a context.
531  *
532  * @bi: single binding information.
533  * @rebind: Whether to issue a bind instead of scrub command.
534  */
535 static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
536 {
537 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
538 	struct {
539 		SVGA3dCmdHeader header;
540 		SVGA3dCmdSetShader body;
541 	} *cmd;
542 
543 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
544 	if (unlikely(cmd == NULL)) {
545 		DRM_ERROR("Failed reserving FIFO space for shader "
546 			  "unbinding.\n");
547 		return -ENOMEM;
548 	}
549 
550 	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
551 	cmd->header.size = sizeof(cmd->body);
552 	cmd->body.cid = bi->ctx->id;
553 	cmd->body.type = bi->i1.shader_type;
554 	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
555 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
556 
557 	return 0;
558 }
559 
560 /**
561  * vmw_context_scrub_render_target - scrub a render target binding
562  * from a context.
563  *
564  * @bi: single binding information.
565  * @rebind: Whether to issue a bind instead of scrub command.
566  */
567 static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
568 					   bool rebind)
569 {
570 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
571 	struct {
572 		SVGA3dCmdHeader header;
573 		SVGA3dCmdSetRenderTarget body;
574 	} *cmd;
575 
576 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
577 	if (unlikely(cmd == NULL)) {
578 		DRM_ERROR("Failed reserving FIFO space for render target "
579 			  "unbinding.\n");
580 		return -ENOMEM;
581 	}
582 
583 	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
584 	cmd->header.size = sizeof(cmd->body);
585 	cmd->body.cid = bi->ctx->id;
586 	cmd->body.type = bi->i1.rt_type;
587 	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
588 	cmd->body.target.face = 0;
589 	cmd->body.target.mipmap = 0;
590 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
591 
592 	return 0;
593 }
594 
595 /**
596  * vmw_context_scrub_texture - scrub a texture binding from a context.
597  *
598  * @bi: single binding information.
599  * @rebind: Whether to issue a bind instead of scrub command.
600  *
601  * TODO: Possibly complement this function with a function that takes
602  * a list of texture bindings and combines them to a single command.
603  */
604 static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
605 				     bool rebind)
606 {
607 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
608 	struct {
609 		SVGA3dCmdHeader header;
610 		struct {
611 			SVGA3dCmdSetTextureState c;
612 			SVGA3dTextureState s1;
613 		} body;
614 	} *cmd;
615 
616 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
617 	if (unlikely(cmd == NULL)) {
618 		DRM_ERROR("Failed reserving FIFO space for texture "
619 			  "unbinding.\n");
620 		return -ENOMEM;
621 	}
622 
623 
624 	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
625 	cmd->header.size = sizeof(cmd->body);
626 	cmd->body.c.cid = bi->ctx->id;
627 	cmd->body.s1.stage = bi->i1.texture_stage;
628 	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
629 	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
630 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
631 
632 	return 0;
633 }
634 
635 /**
636  * vmw_context_binding_drop: Stop tracking a context binding
637  *
638  * @cb: Pointer to binding tracker storage.
639  *
640  * Stops tracking a context binding, and re-initializes its storage.
641  * Typically used when the context binding is replaced with a binding to
642  * another (or the same, for that matter) resource.
643  */
644 static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
645 {
646 	list_del(&cb->ctx_list);
647 	if (!list_empty(&cb->res_list))
648 		list_del(&cb->res_list);
649 	cb->bi.ctx = NULL;
650 }
651 
652 /**
653  * vmw_context_binding_add: Start tracking a context binding
654  *
655  * @cbs: Pointer to the context binding state tracker.
656  * @bi: Information about the binding to track.
657  *
658  * Performs basic checks on the binding to make sure arguments are within
659  * bounds and then starts tracking the binding in the context binding
660  * state structure @cbs.
661  */
662 int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
663 			    const struct vmw_ctx_bindinfo *bi)
664 {
665 	struct vmw_ctx_binding *loc;
666 
667 	switch (bi->bt) {
668 	case vmw_ctx_binding_rt:
669 		if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
670 			DRM_ERROR("Illegal render target type %u.\n",
671 				  (unsigned) bi->i1.rt_type);
672 			return -EINVAL;
673 		}
674 		loc = &cbs->render_targets[bi->i1.rt_type];
675 		break;
676 	case vmw_ctx_binding_tex:
677 		if (unlikely((unsigned)bi->i1.texture_stage >=
678 			     SVGA3D_NUM_TEXTURE_UNITS)) {
679 			DRM_ERROR("Illegal texture/sampler unit %u.\n",
680 				  (unsigned) bi->i1.texture_stage);
681 			return -EINVAL;
682 		}
683 		loc = &cbs->texture_units[bi->i1.texture_stage];
684 		break;
685 	case vmw_ctx_binding_shader:
686 		if (unlikely((unsigned)bi->i1.shader_type >=
687 			     SVGA3D_SHADERTYPE_MAX)) {
688 			DRM_ERROR("Illegal shader type %u.\n",
689 				  (unsigned) bi->i1.shader_type);
690 			return -EINVAL;
691 		}
692 		loc = &cbs->shaders[bi->i1.shader_type];
693 		break;
694 	default:
695 		BUG();
696 	}
697 
698 	if (loc->bi.ctx != NULL)
699 		vmw_context_binding_drop(loc);
700 
701 	loc->bi = *bi;
702 	loc->bi.scrubbed = false;
703 	list_add_tail(&loc->ctx_list, &cbs->list);
704 	INIT_LIST_HEAD(&loc->res_list);
705 
706 	return 0;
707 }
708 
709 /**
710  * vmw_context_binding_transfer: Transfer a context binding tracking entry.
711  *
712  * @cbs: Pointer to the persistent context binding state tracker.
713  * @bi: Information about the binding to track.
714  *
715  */
716 static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
717 					 const struct vmw_ctx_bindinfo *bi)
718 {
719 	struct vmw_ctx_binding *loc;
720 
721 	switch (bi->bt) {
722 	case vmw_ctx_binding_rt:
723 		loc = &cbs->render_targets[bi->i1.rt_type];
724 		break;
725 	case vmw_ctx_binding_tex:
726 		loc = &cbs->texture_units[bi->i1.texture_stage];
727 		break;
728 	case vmw_ctx_binding_shader:
729 		loc = &cbs->shaders[bi->i1.shader_type];
730 		break;
731 	default:
732 		BUG();
733 	}
734 
735 	if (loc->bi.ctx != NULL)
736 		vmw_context_binding_drop(loc);
737 
738 	if (bi->res != NULL) {
739 		loc->bi = *bi;
740 		list_add_tail(&loc->ctx_list, &cbs->list);
741 		list_add_tail(&loc->res_list, &bi->res->binding_head);
742 	}
743 }
744 
745 /**
746  * vmw_context_binding_kill - Kill a binding on the device
747  * and stop tracking it.
748  *
749  * @cb: Pointer to binding tracker storage.
750  *
751  * Emits FIFO commands to scrub a binding represented by @cb.
752  * Then stops tracking the binding and re-initializes its storage.
753  */
754 static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
755 {
756 	if (!cb->bi.scrubbed) {
757 		(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
758 		cb->bi.scrubbed = true;
759 	}
760 	vmw_context_binding_drop(cb);
761 }
762 
763 /**
764  * vmw_context_binding_state_kill - Kill all bindings associated with a
765  * struct vmw_ctx_binding state structure, and re-initialize the structure.
766  *
767  * @cbs: Pointer to the context binding state tracker.
768  *
769  * Emits commands to scrub all bindings associated with the
770  * context binding state tracker. Then re-initializes the whole structure.
771  */
772 static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
773 {
774 	struct vmw_ctx_binding *entry, *next;
775 
776 	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
777 		vmw_context_binding_kill(entry);
778 }
779 
780 /**
781  * vmw_context_binding_state_scrub - Scrub all bindings associated with a
782  * struct vmw_ctx_binding state structure.
783  *
784  * @cbs: Pointer to the context binding state tracker.
785  *
786  * Emits commands to scrub all bindings associated with the
787  * context binding state tracker.
788  */
789 static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
790 {
791 	struct vmw_ctx_binding *entry;
792 
793 	list_for_each_entry(entry, &cbs->list, ctx_list) {
794 		if (!entry->bi.scrubbed) {
795 			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
796 			entry->bi.scrubbed = true;
797 		}
798 	}
799 }
800 
801 /**
802  * vmw_context_binding_res_list_kill - Kill all bindings on a
803  * resource binding list
804  *
805  * @head: list head of resource binding list
806  *
807  * Kills all bindings associated with a specific resource. Typically
808  * called before the resource is destroyed.
809  */
810 void vmw_context_binding_res_list_kill(struct list_head *head)
811 {
812 	struct vmw_ctx_binding *entry, *next;
813 
814 	list_for_each_entry_safe(entry, next, head, res_list)
815 		vmw_context_binding_kill(entry);
816 }
817 
818 /**
819  * vmw_context_binding_res_list_scrub - Scrub all bindings on a
820  * resource binding list
821  *
822  * @head: list head of resource binding list
823  *
824  * Scrub all bindings associated with a specific resource. Typically
825  * called before the resource is evicted.
826  */
827 void vmw_context_binding_res_list_scrub(struct list_head *head)
828 {
829 	struct vmw_ctx_binding *entry;
830 
831 	list_for_each_entry(entry, head, res_list) {
832 		if (!entry->bi.scrubbed) {
833 			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
834 			entry->bi.scrubbed = true;
835 		}
836 	}
837 }
838 
839 /**
840  * vmw_context_binding_state_transfer - Commit staged binding info
841  *
842  * @ctx: Pointer to context to commit the staged binding info to.
843  * @from: Staged binding info built during execbuf.
844  *
845  * Transfers binding info from a temporary structure to the persistent
846  * structure in the context. This can be done once commands
847  */
848 void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
849 					struct vmw_ctx_binding_state *from)
850 {
851 	struct vmw_user_context *uctx =
852 		container_of(ctx, struct vmw_user_context, res);
853 	struct vmw_ctx_binding *entry, *next;
854 
855 	list_for_each_entry_safe(entry, next, &from->list, ctx_list)
856 		vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
857 }
858 
859 /**
860  * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
861  *
862  * @ctx: The context resource
863  *
864  * Walks through the context binding list and rebinds all scrubbed
865  * resources.
866  */
867 int vmw_context_rebind_all(struct vmw_resource *ctx)
868 {
869 	struct vmw_ctx_binding *entry;
870 	struct vmw_user_context *uctx =
871 		container_of(ctx, struct vmw_user_context, res);
872 	struct vmw_ctx_binding_state *cbs = &uctx->cbs;
873 	int ret;
874 
875 	list_for_each_entry(entry, &cbs->list, ctx_list) {
876 		if (likely(!entry->bi.scrubbed))
877 			continue;
878 
879 		if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
880 			    SVGA3D_INVALID_ID))
881 			continue;
882 
883 		ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
884 		if (unlikely(ret != 0))
885 			return ret;
886 
887 		entry->bi.scrubbed = false;
888 	}
889 
890 	return 0;
891 }
892 
893 /**
894  * vmw_context_binding_list - Return a list of context bindings
895  *
896  * @ctx: The context resource
897  *
898  * Returns the current list of bindings of the given context. Note that
899  * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
900  */
901 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
902 {
903 	return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
904 }
905