1 /**************************************************************************
2  *
3  * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "ttm/ttm_placement.h"
31 
32 struct vmw_user_context {
33 	struct ttm_base_object base;
34 	struct vmw_resource res;
35 	struct vmw_ctx_binding_state cbs;
36 };
37 
38 
39 
40 typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
41 
42 static void vmw_user_context_free(struct vmw_resource *res);
43 static struct vmw_resource *
44 vmw_user_context_base_to_res(struct ttm_base_object *base);
45 
46 static int vmw_gb_context_create(struct vmw_resource *res);
47 static int vmw_gb_context_bind(struct vmw_resource *res,
48 			       struct ttm_validate_buffer *val_buf);
49 static int vmw_gb_context_unbind(struct vmw_resource *res,
50 				 bool readback,
51 				 struct ttm_validate_buffer *val_buf);
52 static int vmw_gb_context_destroy(struct vmw_resource *res);
53 static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
54 static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
55 static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
56 static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
57 static uint64_t vmw_user_context_size;
58 
59 static const struct vmw_user_resource_conv user_context_conv = {
60 	.object_type = VMW_RES_CONTEXT,
61 	.base_obj_to_res = vmw_user_context_base_to_res,
62 	.res_free = vmw_user_context_free
63 };
64 
65 const struct vmw_user_resource_conv *user_context_converter =
66 	&user_context_conv;
67 
68 
69 static const struct vmw_res_func vmw_legacy_context_func = {
70 	.res_type = vmw_res_context,
71 	.needs_backup = false,
72 	.may_evict = false,
73 	.type_name = "legacy contexts",
74 	.backup_placement = NULL,
75 	.create = NULL,
76 	.destroy = NULL,
77 	.bind = NULL,
78 	.unbind = NULL
79 };
80 
81 static const struct vmw_res_func vmw_gb_context_func = {
82 	.res_type = vmw_res_context,
83 	.needs_backup = true,
84 	.may_evict = true,
85 	.type_name = "guest backed contexts",
86 	.backup_placement = &vmw_mob_placement,
87 	.create = vmw_gb_context_create,
88 	.destroy = vmw_gb_context_destroy,
89 	.bind = vmw_gb_context_bind,
90 	.unbind = vmw_gb_context_unbind
91 };
92 
93 static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
94 	[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
95 	[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
96 	[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
97 
98 /**
99  * Context management:
100  */
101 
102 static void vmw_hw_context_destroy(struct vmw_resource *res)
103 {
104 
105 	struct vmw_private *dev_priv = res->dev_priv;
106 	struct {
107 		SVGA3dCmdHeader header;
108 		SVGA3dCmdDestroyContext body;
109 	} *cmd;
110 
111 
112 	if (res->func->destroy == vmw_gb_context_destroy) {
113 		mutex_lock(&dev_priv->cmdbuf_mutex);
114 		(void) vmw_gb_context_destroy(res);
115 		if (dev_priv->pinned_bo != NULL &&
116 		    !dev_priv->query_cid_valid)
117 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
118 		mutex_unlock(&dev_priv->cmdbuf_mutex);
119 		return;
120 	}
121 
122 	vmw_execbuf_release_pinned_bo(dev_priv);
123 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
124 	if (unlikely(cmd == NULL)) {
125 		DRM_ERROR("Failed reserving FIFO space for surface "
126 			  "destruction.\n");
127 		return;
128 	}
129 
130 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
131 	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
132 	cmd->body.cid = cpu_to_le32(res->id);
133 
134 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
135 	vmw_3d_resource_dec(dev_priv, false);
136 }
137 
138 static int vmw_gb_context_init(struct vmw_private *dev_priv,
139 			       struct vmw_resource *res,
140 			       void (*res_free) (struct vmw_resource *res))
141 {
142 	int ret;
143 	struct vmw_user_context *uctx =
144 		container_of(res, struct vmw_user_context, res);
145 
146 	ret = vmw_resource_init(dev_priv, res, true,
147 				res_free, &vmw_gb_context_func);
148 	res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
149 
150 	if (unlikely(ret != 0)) {
151 		if (res_free)
152 			res_free(res);
153 		else
154 			kfree(res);
155 		return ret;
156 	}
157 
158 	memset(&uctx->cbs, 0, sizeof(uctx->cbs));
159 	INIT_LIST_HEAD(&uctx->cbs.list);
160 
161 	vmw_resource_activate(res, vmw_hw_context_destroy);
162 	return 0;
163 }
164 
165 static int vmw_context_init(struct vmw_private *dev_priv,
166 			    struct vmw_resource *res,
167 			    void (*res_free) (struct vmw_resource *res))
168 {
169 	int ret;
170 
171 	struct {
172 		SVGA3dCmdHeader header;
173 		SVGA3dCmdDefineContext body;
174 	} *cmd;
175 
176 	if (dev_priv->has_mob)
177 		return vmw_gb_context_init(dev_priv, res, res_free);
178 
179 	ret = vmw_resource_init(dev_priv, res, false,
180 				res_free, &vmw_legacy_context_func);
181 
182 	if (unlikely(ret != 0)) {
183 		DRM_ERROR("Failed to allocate a resource id.\n");
184 		goto out_early;
185 	}
186 
187 	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
188 		DRM_ERROR("Out of hw context ids.\n");
189 		vmw_resource_unreference(&res);
190 		return -ENOMEM;
191 	}
192 
193 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
194 	if (unlikely(cmd == NULL)) {
195 		DRM_ERROR("Fifo reserve failed.\n");
196 		vmw_resource_unreference(&res);
197 		return -ENOMEM;
198 	}
199 
200 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
201 	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
202 	cmd->body.cid = cpu_to_le32(res->id);
203 
204 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
205 	(void) vmw_3d_resource_inc(dev_priv, false);
206 	vmw_resource_activate(res, vmw_hw_context_destroy);
207 	return 0;
208 
209 out_early:
210 	if (res_free == NULL)
211 		kfree(res);
212 	else
213 		res_free(res);
214 	return ret;
215 }
216 
217 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
218 {
219 	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
220 	int ret;
221 
222 	if (unlikely(res == NULL))
223 		return NULL;
224 
225 	ret = vmw_context_init(dev_priv, res, NULL);
226 
227 	return (ret == 0) ? res : NULL;
228 }
229 
230 
231 static int vmw_gb_context_create(struct vmw_resource *res)
232 {
233 	struct vmw_private *dev_priv = res->dev_priv;
234 	int ret;
235 	struct {
236 		SVGA3dCmdHeader header;
237 		SVGA3dCmdDefineGBContext body;
238 	} *cmd;
239 
240 	if (likely(res->id != -1))
241 		return 0;
242 
243 	ret = vmw_resource_alloc_id(res);
244 	if (unlikely(ret != 0)) {
245 		DRM_ERROR("Failed to allocate a context id.\n");
246 		goto out_no_id;
247 	}
248 
249 	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
250 		ret = -EBUSY;
251 		goto out_no_fifo;
252 	}
253 
254 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
255 	if (unlikely(cmd == NULL)) {
256 		DRM_ERROR("Failed reserving FIFO space for context "
257 			  "creation.\n");
258 		ret = -ENOMEM;
259 		goto out_no_fifo;
260 	}
261 
262 	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
263 	cmd->header.size = sizeof(cmd->body);
264 	cmd->body.cid = res->id;
265 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
266 	(void) vmw_3d_resource_inc(dev_priv, false);
267 
268 	return 0;
269 
270 out_no_fifo:
271 	vmw_resource_release_id(res);
272 out_no_id:
273 	return ret;
274 }
275 
276 static int vmw_gb_context_bind(struct vmw_resource *res,
277 			       struct ttm_validate_buffer *val_buf)
278 {
279 	struct vmw_private *dev_priv = res->dev_priv;
280 	struct {
281 		SVGA3dCmdHeader header;
282 		SVGA3dCmdBindGBContext body;
283 	} *cmd;
284 	struct ttm_buffer_object *bo = val_buf->bo;
285 
286 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
287 
288 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
289 	if (unlikely(cmd == NULL)) {
290 		DRM_ERROR("Failed reserving FIFO space for context "
291 			  "binding.\n");
292 		return -ENOMEM;
293 	}
294 
295 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
296 	cmd->header.size = sizeof(cmd->body);
297 	cmd->body.cid = res->id;
298 	cmd->body.mobid = bo->mem.start;
299 	cmd->body.validContents = res->backup_dirty;
300 	res->backup_dirty = false;
301 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
302 
303 	return 0;
304 }
305 
306 static int vmw_gb_context_unbind(struct vmw_resource *res,
307 				 bool readback,
308 				 struct ttm_validate_buffer *val_buf)
309 {
310 	struct vmw_private *dev_priv = res->dev_priv;
311 	struct ttm_buffer_object *bo = val_buf->bo;
312 	struct vmw_fence_obj *fence;
313 	struct vmw_user_context *uctx =
314 		container_of(res, struct vmw_user_context, res);
315 
316 	struct {
317 		SVGA3dCmdHeader header;
318 		SVGA3dCmdReadbackGBContext body;
319 	} *cmd1;
320 	struct {
321 		SVGA3dCmdHeader header;
322 		SVGA3dCmdBindGBContext body;
323 	} *cmd2;
324 	uint32_t submit_size;
325 	uint8_t *cmd;
326 
327 
328 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
329 
330 	mutex_lock(&dev_priv->binding_mutex);
331 	vmw_context_binding_state_kill(&uctx->cbs);
332 
333 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
334 
335 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
336 	if (unlikely(cmd == NULL)) {
337 		DRM_ERROR("Failed reserving FIFO space for context "
338 			  "unbinding.\n");
339 		mutex_unlock(&dev_priv->binding_mutex);
340 		return -ENOMEM;
341 	}
342 
343 	cmd2 = (void *) cmd;
344 	if (readback) {
345 		cmd1 = (void *) cmd;
346 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
347 		cmd1->header.size = sizeof(cmd1->body);
348 		cmd1->body.cid = res->id;
349 		cmd2 = (void *) (&cmd1[1]);
350 	}
351 	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
352 	cmd2->header.size = sizeof(cmd2->body);
353 	cmd2->body.cid = res->id;
354 	cmd2->body.mobid = SVGA3D_INVALID_ID;
355 
356 	vmw_fifo_commit(dev_priv, submit_size);
357 	mutex_unlock(&dev_priv->binding_mutex);
358 
359 	/*
360 	 * Create a fence object and fence the backup buffer.
361 	 */
362 
363 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
364 					  &fence, NULL);
365 
366 	vmw_fence_single_bo(bo, fence);
367 
368 	if (likely(fence != NULL))
369 		vmw_fence_obj_unreference(&fence);
370 
371 	return 0;
372 }
373 
374 static int vmw_gb_context_destroy(struct vmw_resource *res)
375 {
376 	struct vmw_private *dev_priv = res->dev_priv;
377 	struct {
378 		SVGA3dCmdHeader header;
379 		SVGA3dCmdDestroyGBContext body;
380 	} *cmd;
381 	struct vmw_user_context *uctx =
382 		container_of(res, struct vmw_user_context, res);
383 
384 	BUG_ON(!list_empty(&uctx->cbs.list));
385 
386 	if (likely(res->id == -1))
387 		return 0;
388 
389 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
390 	if (unlikely(cmd == NULL)) {
391 		DRM_ERROR("Failed reserving FIFO space for context "
392 			  "destruction.\n");
393 		return -ENOMEM;
394 	}
395 
396 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
397 	cmd->header.size = sizeof(cmd->body);
398 	cmd->body.cid = res->id;
399 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
400 	if (dev_priv->query_cid == res->id)
401 		dev_priv->query_cid_valid = false;
402 	vmw_resource_release_id(res);
403 	vmw_3d_resource_dec(dev_priv, false);
404 
405 	return 0;
406 }
407 
408 /**
409  * User-space context management:
410  */
411 
412 static struct vmw_resource *
413 vmw_user_context_base_to_res(struct ttm_base_object *base)
414 {
415 	return &(container_of(base, struct vmw_user_context, base)->res);
416 }
417 
418 static void vmw_user_context_free(struct vmw_resource *res)
419 {
420 	struct vmw_user_context *ctx =
421 	    container_of(res, struct vmw_user_context, res);
422 	struct vmw_private *dev_priv = res->dev_priv;
423 
424 	ttm_base_object_kfree(ctx, base);
425 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
426 			    vmw_user_context_size);
427 }
428 
429 /**
430  * This function is called when user space has no more references on the
431  * base object. It releases the base-object's reference on the resource object.
432  */
433 
434 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
435 {
436 	struct ttm_base_object *base = *p_base;
437 	struct vmw_user_context *ctx =
438 	    container_of(base, struct vmw_user_context, base);
439 	struct vmw_resource *res = &ctx->res;
440 
441 	*p_base = NULL;
442 	vmw_resource_unreference(&res);
443 }
444 
445 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
446 			      struct drm_file *file_priv)
447 {
448 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
449 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
450 
451 	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
452 }
453 
454 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
455 			     struct drm_file *file_priv)
456 {
457 	struct vmw_private *dev_priv = vmw_priv(dev);
458 	struct vmw_user_context *ctx;
459 	struct vmw_resource *res;
460 	struct vmw_resource *tmp;
461 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
462 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
463 	struct vmw_master *vmaster = vmw_master(file_priv->master);
464 	int ret;
465 
466 
467 	/*
468 	 * Approximate idr memory usage with 128 bytes. It will be limited
469 	 * by maximum number_of contexts anyway.
470 	 */
471 
472 	if (unlikely(vmw_user_context_size == 0))
473 		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
474 
475 	ret = ttm_read_lock(&vmaster->lock, true);
476 	if (unlikely(ret != 0))
477 		return ret;
478 
479 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
480 				   vmw_user_context_size,
481 				   false, true);
482 	if (unlikely(ret != 0)) {
483 		if (ret != -ERESTARTSYS)
484 			DRM_ERROR("Out of graphics memory for context"
485 				  " creation.\n");
486 		goto out_unlock;
487 	}
488 
489 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
490 	if (unlikely(ctx == NULL)) {
491 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
492 				    vmw_user_context_size);
493 		ret = -ENOMEM;
494 		goto out_unlock;
495 	}
496 
497 	res = &ctx->res;
498 	ctx->base.shareable = false;
499 	ctx->base.tfile = NULL;
500 
501 	/*
502 	 * From here on, the destructor takes over resource freeing.
503 	 */
504 
505 	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
506 	if (unlikely(ret != 0))
507 		goto out_unlock;
508 
509 	tmp = vmw_resource_reference(&ctx->res);
510 	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
511 				   &vmw_user_context_base_release, NULL);
512 
513 	if (unlikely(ret != 0)) {
514 		vmw_resource_unreference(&tmp);
515 		goto out_err;
516 	}
517 
518 	arg->cid = ctx->base.hash.key;
519 out_err:
520 	vmw_resource_unreference(&res);
521 out_unlock:
522 	ttm_read_unlock(&vmaster->lock);
523 	return ret;
524 
525 }
526 
527 /**
528  * vmw_context_scrub_shader - scrub a shader binding from a context.
529  *
530  * @bi: single binding information.
531  */
532 static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
533 {
534 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
535 	struct {
536 		SVGA3dCmdHeader header;
537 		SVGA3dCmdSetShader body;
538 	} *cmd;
539 
540 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
541 	if (unlikely(cmd == NULL)) {
542 		DRM_ERROR("Failed reserving FIFO space for shader "
543 			  "unbinding.\n");
544 		return -ENOMEM;
545 	}
546 
547 	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
548 	cmd->header.size = sizeof(cmd->body);
549 	cmd->body.cid = bi->ctx->id;
550 	cmd->body.type = bi->i1.shader_type;
551 	cmd->body.shid = SVGA3D_INVALID_ID;
552 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
553 
554 	return 0;
555 }
556 
557 /**
558  * vmw_context_scrub_render_target - scrub a render target binding
559  * from a context.
560  *
561  * @bi: single binding information.
562  */
563 static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
564 {
565 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
566 	struct {
567 		SVGA3dCmdHeader header;
568 		SVGA3dCmdSetRenderTarget body;
569 	} *cmd;
570 
571 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
572 	if (unlikely(cmd == NULL)) {
573 		DRM_ERROR("Failed reserving FIFO space for render target "
574 			  "unbinding.\n");
575 		return -ENOMEM;
576 	}
577 
578 	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
579 	cmd->header.size = sizeof(cmd->body);
580 	cmd->body.cid = bi->ctx->id;
581 	cmd->body.type = bi->i1.rt_type;
582 	cmd->body.target.sid = SVGA3D_INVALID_ID;
583 	cmd->body.target.face = 0;
584 	cmd->body.target.mipmap = 0;
585 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
586 
587 	return 0;
588 }
589 
590 /**
591  * vmw_context_scrub_texture - scrub a texture binding from a context.
592  *
593  * @bi: single binding information.
594  *
595  * TODO: Possibly complement this function with a function that takes
596  * a list of texture bindings and combines them to a single command.
597  */
598 static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
599 {
600 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
601 	struct {
602 		SVGA3dCmdHeader header;
603 		struct {
604 			SVGA3dCmdSetTextureState c;
605 			SVGA3dTextureState s1;
606 		} body;
607 	} *cmd;
608 
609 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
610 	if (unlikely(cmd == NULL)) {
611 		DRM_ERROR("Failed reserving FIFO space for texture "
612 			  "unbinding.\n");
613 		return -ENOMEM;
614 	}
615 
616 
617 	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
618 	cmd->header.size = sizeof(cmd->body);
619 	cmd->body.c.cid = bi->ctx->id;
620 	cmd->body.s1.stage = bi->i1.texture_stage;
621 	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
622 	cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
623 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
624 
625 	return 0;
626 }
627 
628 /**
629  * vmw_context_binding_drop: Stop tracking a context binding
630  *
631  * @cb: Pointer to binding tracker storage.
632  *
633  * Stops tracking a context binding, and re-initializes its storage.
634  * Typically used when the context binding is replaced with a binding to
635  * another (or the same, for that matter) resource.
636  */
637 static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
638 {
639 	list_del(&cb->ctx_list);
640 	if (!list_empty(&cb->res_list))
641 		list_del(&cb->res_list);
642 	cb->bi.ctx = NULL;
643 }
644 
645 /**
646  * vmw_context_binding_add: Start tracking a context binding
647  *
648  * @cbs: Pointer to the context binding state tracker.
649  * @bi: Information about the binding to track.
650  *
651  * Performs basic checks on the binding to make sure arguments are within
652  * bounds and then starts tracking the binding in the context binding
653  * state structure @cbs.
654  */
655 int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
656 			    const struct vmw_ctx_bindinfo *bi)
657 {
658 	struct vmw_ctx_binding *loc;
659 
660 	switch (bi->bt) {
661 	case vmw_ctx_binding_rt:
662 		if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
663 			DRM_ERROR("Illegal render target type %u.\n",
664 				  (unsigned) bi->i1.rt_type);
665 			return -EINVAL;
666 		}
667 		loc = &cbs->render_targets[bi->i1.rt_type];
668 		break;
669 	case vmw_ctx_binding_tex:
670 		if (unlikely((unsigned)bi->i1.texture_stage >=
671 			     SVGA3D_NUM_TEXTURE_UNITS)) {
672 			DRM_ERROR("Illegal texture/sampler unit %u.\n",
673 				  (unsigned) bi->i1.texture_stage);
674 			return -EINVAL;
675 		}
676 		loc = &cbs->texture_units[bi->i1.texture_stage];
677 		break;
678 	case vmw_ctx_binding_shader:
679 		if (unlikely((unsigned)bi->i1.shader_type >=
680 			     SVGA3D_SHADERTYPE_MAX)) {
681 			DRM_ERROR("Illegal shader type %u.\n",
682 				  (unsigned) bi->i1.shader_type);
683 			return -EINVAL;
684 		}
685 		loc = &cbs->shaders[bi->i1.shader_type];
686 		break;
687 	default:
688 		BUG();
689 	}
690 
691 	if (loc->bi.ctx != NULL)
692 		vmw_context_binding_drop(loc);
693 
694 	loc->bi = *bi;
695 	list_add_tail(&loc->ctx_list, &cbs->list);
696 	INIT_LIST_HEAD(&loc->res_list);
697 
698 	return 0;
699 }
700 
701 /**
702  * vmw_context_binding_transfer: Transfer a context binding tracking entry.
703  *
704  * @cbs: Pointer to the persistent context binding state tracker.
705  * @bi: Information about the binding to track.
706  *
707  */
708 static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
709 					 const struct vmw_ctx_bindinfo *bi)
710 {
711 	struct vmw_ctx_binding *loc;
712 
713 	switch (bi->bt) {
714 	case vmw_ctx_binding_rt:
715 		loc = &cbs->render_targets[bi->i1.rt_type];
716 		break;
717 	case vmw_ctx_binding_tex:
718 		loc = &cbs->texture_units[bi->i1.texture_stage];
719 		break;
720 	case vmw_ctx_binding_shader:
721 		loc = &cbs->shaders[bi->i1.shader_type];
722 		break;
723 	default:
724 		BUG();
725 	}
726 
727 	if (loc->bi.ctx != NULL)
728 		vmw_context_binding_drop(loc);
729 
730 	loc->bi = *bi;
731 	list_add_tail(&loc->ctx_list, &cbs->list);
732 	if (bi->res != NULL)
733 		list_add_tail(&loc->res_list, &bi->res->binding_head);
734 	else
735 		INIT_LIST_HEAD(&loc->res_list);
736 }
737 
738 /**
739  * vmw_context_binding_kill - Kill a binding on the device
740  * and stop tracking it.
741  *
742  * @cb: Pointer to binding tracker storage.
743  *
744  * Emits FIFO commands to scrub a binding represented by @cb.
745  * Then stops tracking the binding and re-initializes its storage.
746  */
747 static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
748 {
749 	(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
750 	vmw_context_binding_drop(cb);
751 }
752 
753 /**
754  * vmw_context_binding_state_kill - Kill all bindings associated with a
755  * struct vmw_ctx_binding state structure, and re-initialize the structure.
756  *
757  * @cbs: Pointer to the context binding state tracker.
758  *
759  * Emits commands to scrub all bindings associated with the
760  * context binding state tracker. Then re-initializes the whole structure.
761  */
762 static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
763 {
764 	struct vmw_ctx_binding *entry, *next;
765 
766 	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
767 		vmw_context_binding_kill(entry);
768 }
769 
770 /**
771  * vmw_context_binding_res_list_kill - Kill all bindings on a
772  * resource binding list
773  *
774  * @head: list head of resource binding list
775  *
776  * Kills all bindings associated with a specific resource. Typically
777  * called before the resource is destroyed.
778  */
779 void vmw_context_binding_res_list_kill(struct list_head *head)
780 {
781 	struct vmw_ctx_binding *entry, *next;
782 
783 	list_for_each_entry_safe(entry, next, head, res_list)
784 		vmw_context_binding_kill(entry);
785 }
786 
787 /**
788  * vmw_context_binding_state_transfer - Commit staged binding info
789  *
790  * @ctx: Pointer to context to commit the staged binding info to.
791  * @from: Staged binding info built during execbuf.
792  *
793  * Transfers binding info from a temporary structure to the persistent
794  * structure in the context. This can be done once commands
795  */
796 void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
797 					struct vmw_ctx_binding_state *from)
798 {
799 	struct vmw_user_context *uctx =
800 		container_of(ctx, struct vmw_user_context, res);
801 	struct vmw_ctx_binding *entry, *next;
802 
803 	list_for_each_entry_safe(entry, next, &from->list, ctx_list)
804 		vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
805 }
806