1 /**************************************************************************
2  *
3  * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "ttm/ttm_placement.h"
31 
32 struct vmw_user_context {
33 	struct ttm_base_object base;
34 	struct vmw_resource res;
35 	struct vmw_ctx_binding_state cbs;
36 };
37 
38 
39 
40 typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
41 
42 static void vmw_user_context_free(struct vmw_resource *res);
43 static struct vmw_resource *
44 vmw_user_context_base_to_res(struct ttm_base_object *base);
45 
46 static int vmw_gb_context_create(struct vmw_resource *res);
47 static int vmw_gb_context_bind(struct vmw_resource *res,
48 			       struct ttm_validate_buffer *val_buf);
49 static int vmw_gb_context_unbind(struct vmw_resource *res,
50 				 bool readback,
51 				 struct ttm_validate_buffer *val_buf);
52 static int vmw_gb_context_destroy(struct vmw_resource *res);
53 static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
54 static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
55 					   bool rebind);
56 static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
57 static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
58 static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
59 static uint64_t vmw_user_context_size;
60 
61 static const struct vmw_user_resource_conv user_context_conv = {
62 	.object_type = VMW_RES_CONTEXT,
63 	.base_obj_to_res = vmw_user_context_base_to_res,
64 	.res_free = vmw_user_context_free
65 };
66 
67 const struct vmw_user_resource_conv *user_context_converter =
68 	&user_context_conv;
69 
70 
71 static const struct vmw_res_func vmw_legacy_context_func = {
72 	.res_type = vmw_res_context,
73 	.needs_backup = false,
74 	.may_evict = false,
75 	.type_name = "legacy contexts",
76 	.backup_placement = NULL,
77 	.create = NULL,
78 	.destroy = NULL,
79 	.bind = NULL,
80 	.unbind = NULL
81 };
82 
83 static const struct vmw_res_func vmw_gb_context_func = {
84 	.res_type = vmw_res_context,
85 	.needs_backup = true,
86 	.may_evict = true,
87 	.type_name = "guest backed contexts",
88 	.backup_placement = &vmw_mob_placement,
89 	.create = vmw_gb_context_create,
90 	.destroy = vmw_gb_context_destroy,
91 	.bind = vmw_gb_context_bind,
92 	.unbind = vmw_gb_context_unbind
93 };
94 
95 static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
96 	[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
97 	[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
98 	[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
99 
100 /**
101  * Context management:
102  */
103 
104 static void vmw_hw_context_destroy(struct vmw_resource *res)
105 {
106 
107 	struct vmw_private *dev_priv = res->dev_priv;
108 	struct {
109 		SVGA3dCmdHeader header;
110 		SVGA3dCmdDestroyContext body;
111 	} *cmd;
112 
113 
114 	if (res->func->destroy == vmw_gb_context_destroy) {
115 		mutex_lock(&dev_priv->cmdbuf_mutex);
116 		mutex_lock(&dev_priv->binding_mutex);
117 		(void) vmw_context_binding_state_kill
118 			(&container_of(res, struct vmw_user_context, res)->cbs);
119 		(void) vmw_gb_context_destroy(res);
120 		if (dev_priv->pinned_bo != NULL &&
121 		    !dev_priv->query_cid_valid)
122 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
123 		mutex_unlock(&dev_priv->binding_mutex);
124 		mutex_unlock(&dev_priv->cmdbuf_mutex);
125 		return;
126 	}
127 
128 	vmw_execbuf_release_pinned_bo(dev_priv);
129 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
130 	if (unlikely(cmd == NULL)) {
131 		DRM_ERROR("Failed reserving FIFO space for surface "
132 			  "destruction.\n");
133 		return;
134 	}
135 
136 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
137 	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
138 	cmd->body.cid = cpu_to_le32(res->id);
139 
140 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
141 	vmw_3d_resource_dec(dev_priv, false);
142 }
143 
144 static int vmw_gb_context_init(struct vmw_private *dev_priv,
145 			       struct vmw_resource *res,
146 			       void (*res_free) (struct vmw_resource *res))
147 {
148 	int ret;
149 	struct vmw_user_context *uctx =
150 		container_of(res, struct vmw_user_context, res);
151 
152 	ret = vmw_resource_init(dev_priv, res, true,
153 				res_free, &vmw_gb_context_func);
154 	res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
155 
156 	if (unlikely(ret != 0)) {
157 		if (res_free)
158 			res_free(res);
159 		else
160 			kfree(res);
161 		return ret;
162 	}
163 
164 	memset(&uctx->cbs, 0, sizeof(uctx->cbs));
165 	INIT_LIST_HEAD(&uctx->cbs.list);
166 
167 	vmw_resource_activate(res, vmw_hw_context_destroy);
168 	return 0;
169 }
170 
171 static int vmw_context_init(struct vmw_private *dev_priv,
172 			    struct vmw_resource *res,
173 			    void (*res_free) (struct vmw_resource *res))
174 {
175 	int ret;
176 
177 	struct {
178 		SVGA3dCmdHeader header;
179 		SVGA3dCmdDefineContext body;
180 	} *cmd;
181 
182 	if (dev_priv->has_mob)
183 		return vmw_gb_context_init(dev_priv, res, res_free);
184 
185 	ret = vmw_resource_init(dev_priv, res, false,
186 				res_free, &vmw_legacy_context_func);
187 
188 	if (unlikely(ret != 0)) {
189 		DRM_ERROR("Failed to allocate a resource id.\n");
190 		goto out_early;
191 	}
192 
193 	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
194 		DRM_ERROR("Out of hw context ids.\n");
195 		vmw_resource_unreference(&res);
196 		return -ENOMEM;
197 	}
198 
199 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
200 	if (unlikely(cmd == NULL)) {
201 		DRM_ERROR("Fifo reserve failed.\n");
202 		vmw_resource_unreference(&res);
203 		return -ENOMEM;
204 	}
205 
206 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
207 	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
208 	cmd->body.cid = cpu_to_le32(res->id);
209 
210 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
211 	(void) vmw_3d_resource_inc(dev_priv, false);
212 	vmw_resource_activate(res, vmw_hw_context_destroy);
213 	return 0;
214 
215 out_early:
216 	if (res_free == NULL)
217 		kfree(res);
218 	else
219 		res_free(res);
220 	return ret;
221 }
222 
223 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
224 {
225 	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
226 	int ret;
227 
228 	if (unlikely(res == NULL))
229 		return NULL;
230 
231 	ret = vmw_context_init(dev_priv, res, NULL);
232 
233 	return (ret == 0) ? res : NULL;
234 }
235 
236 
237 static int vmw_gb_context_create(struct vmw_resource *res)
238 {
239 	struct vmw_private *dev_priv = res->dev_priv;
240 	int ret;
241 	struct {
242 		SVGA3dCmdHeader header;
243 		SVGA3dCmdDefineGBContext body;
244 	} *cmd;
245 
246 	if (likely(res->id != -1))
247 		return 0;
248 
249 	ret = vmw_resource_alloc_id(res);
250 	if (unlikely(ret != 0)) {
251 		DRM_ERROR("Failed to allocate a context id.\n");
252 		goto out_no_id;
253 	}
254 
255 	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
256 		ret = -EBUSY;
257 		goto out_no_fifo;
258 	}
259 
260 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
261 	if (unlikely(cmd == NULL)) {
262 		DRM_ERROR("Failed reserving FIFO space for context "
263 			  "creation.\n");
264 		ret = -ENOMEM;
265 		goto out_no_fifo;
266 	}
267 
268 	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
269 	cmd->header.size = sizeof(cmd->body);
270 	cmd->body.cid = res->id;
271 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
272 	(void) vmw_3d_resource_inc(dev_priv, false);
273 
274 	return 0;
275 
276 out_no_fifo:
277 	vmw_resource_release_id(res);
278 out_no_id:
279 	return ret;
280 }
281 
282 static int vmw_gb_context_bind(struct vmw_resource *res,
283 			       struct ttm_validate_buffer *val_buf)
284 {
285 	struct vmw_private *dev_priv = res->dev_priv;
286 	struct {
287 		SVGA3dCmdHeader header;
288 		SVGA3dCmdBindGBContext body;
289 	} *cmd;
290 	struct ttm_buffer_object *bo = val_buf->bo;
291 
292 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
293 
294 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
295 	if (unlikely(cmd == NULL)) {
296 		DRM_ERROR("Failed reserving FIFO space for context "
297 			  "binding.\n");
298 		return -ENOMEM;
299 	}
300 
301 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
302 	cmd->header.size = sizeof(cmd->body);
303 	cmd->body.cid = res->id;
304 	cmd->body.mobid = bo->mem.start;
305 	cmd->body.validContents = res->backup_dirty;
306 	res->backup_dirty = false;
307 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
308 
309 	return 0;
310 }
311 
312 static int vmw_gb_context_unbind(struct vmw_resource *res,
313 				 bool readback,
314 				 struct ttm_validate_buffer *val_buf)
315 {
316 	struct vmw_private *dev_priv = res->dev_priv;
317 	struct ttm_buffer_object *bo = val_buf->bo;
318 	struct vmw_fence_obj *fence;
319 	struct vmw_user_context *uctx =
320 		container_of(res, struct vmw_user_context, res);
321 
322 	struct {
323 		SVGA3dCmdHeader header;
324 		SVGA3dCmdReadbackGBContext body;
325 	} *cmd1;
326 	struct {
327 		SVGA3dCmdHeader header;
328 		SVGA3dCmdBindGBContext body;
329 	} *cmd2;
330 	uint32_t submit_size;
331 	uint8_t *cmd;
332 
333 
334 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
335 
336 	mutex_lock(&dev_priv->binding_mutex);
337 	vmw_context_binding_state_scrub(&uctx->cbs);
338 
339 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
340 
341 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
342 	if (unlikely(cmd == NULL)) {
343 		DRM_ERROR("Failed reserving FIFO space for context "
344 			  "unbinding.\n");
345 		mutex_unlock(&dev_priv->binding_mutex);
346 		return -ENOMEM;
347 	}
348 
349 	cmd2 = (void *) cmd;
350 	if (readback) {
351 		cmd1 = (void *) cmd;
352 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
353 		cmd1->header.size = sizeof(cmd1->body);
354 		cmd1->body.cid = res->id;
355 		cmd2 = (void *) (&cmd1[1]);
356 	}
357 	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
358 	cmd2->header.size = sizeof(cmd2->body);
359 	cmd2->body.cid = res->id;
360 	cmd2->body.mobid = SVGA3D_INVALID_ID;
361 
362 	vmw_fifo_commit(dev_priv, submit_size);
363 	mutex_unlock(&dev_priv->binding_mutex);
364 
365 	/*
366 	 * Create a fence object and fence the backup buffer.
367 	 */
368 
369 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
370 					  &fence, NULL);
371 
372 	vmw_fence_single_bo(bo, fence);
373 
374 	if (likely(fence != NULL))
375 		vmw_fence_obj_unreference(&fence);
376 
377 	return 0;
378 }
379 
380 static int vmw_gb_context_destroy(struct vmw_resource *res)
381 {
382 	struct vmw_private *dev_priv = res->dev_priv;
383 	struct {
384 		SVGA3dCmdHeader header;
385 		SVGA3dCmdDestroyGBContext body;
386 	} *cmd;
387 
388 	if (likely(res->id == -1))
389 		return 0;
390 
391 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
392 	if (unlikely(cmd == NULL)) {
393 		DRM_ERROR("Failed reserving FIFO space for context "
394 			  "destruction.\n");
395 		return -ENOMEM;
396 	}
397 
398 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
399 	cmd->header.size = sizeof(cmd->body);
400 	cmd->body.cid = res->id;
401 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
402 	if (dev_priv->query_cid == res->id)
403 		dev_priv->query_cid_valid = false;
404 	vmw_resource_release_id(res);
405 	vmw_3d_resource_dec(dev_priv, false);
406 
407 	return 0;
408 }
409 
410 /**
411  * User-space context management:
412  */
413 
414 static struct vmw_resource *
415 vmw_user_context_base_to_res(struct ttm_base_object *base)
416 {
417 	return &(container_of(base, struct vmw_user_context, base)->res);
418 }
419 
420 static void vmw_user_context_free(struct vmw_resource *res)
421 {
422 	struct vmw_user_context *ctx =
423 	    container_of(res, struct vmw_user_context, res);
424 	struct vmw_private *dev_priv = res->dev_priv;
425 
426 	ttm_base_object_kfree(ctx, base);
427 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
428 			    vmw_user_context_size);
429 }
430 
431 /**
432  * This function is called when user space has no more references on the
433  * base object. It releases the base-object's reference on the resource object.
434  */
435 
436 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
437 {
438 	struct ttm_base_object *base = *p_base;
439 	struct vmw_user_context *ctx =
440 	    container_of(base, struct vmw_user_context, base);
441 	struct vmw_resource *res = &ctx->res;
442 
443 	*p_base = NULL;
444 	vmw_resource_unreference(&res);
445 }
446 
447 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
448 			      struct drm_file *file_priv)
449 {
450 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
451 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
452 
453 	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
454 }
455 
456 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
457 			     struct drm_file *file_priv)
458 {
459 	struct vmw_private *dev_priv = vmw_priv(dev);
460 	struct vmw_user_context *ctx;
461 	struct vmw_resource *res;
462 	struct vmw_resource *tmp;
463 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
464 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
465 	struct vmw_master *vmaster = vmw_master(file_priv->master);
466 	int ret;
467 
468 
469 	/*
470 	 * Approximate idr memory usage with 128 bytes. It will be limited
471 	 * by maximum number_of contexts anyway.
472 	 */
473 
474 	if (unlikely(vmw_user_context_size == 0))
475 		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
476 
477 	ret = ttm_read_lock(&vmaster->lock, true);
478 	if (unlikely(ret != 0))
479 		return ret;
480 
481 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
482 				   vmw_user_context_size,
483 				   false, true);
484 	if (unlikely(ret != 0)) {
485 		if (ret != -ERESTARTSYS)
486 			DRM_ERROR("Out of graphics memory for context"
487 				  " creation.\n");
488 		goto out_unlock;
489 	}
490 
491 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
492 	if (unlikely(ctx == NULL)) {
493 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
494 				    vmw_user_context_size);
495 		ret = -ENOMEM;
496 		goto out_unlock;
497 	}
498 
499 	res = &ctx->res;
500 	ctx->base.shareable = false;
501 	ctx->base.tfile = NULL;
502 
503 	/*
504 	 * From here on, the destructor takes over resource freeing.
505 	 */
506 
507 	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
508 	if (unlikely(ret != 0))
509 		goto out_unlock;
510 
511 	tmp = vmw_resource_reference(&ctx->res);
512 	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
513 				   &vmw_user_context_base_release, NULL);
514 
515 	if (unlikely(ret != 0)) {
516 		vmw_resource_unreference(&tmp);
517 		goto out_err;
518 	}
519 
520 	arg->cid = ctx->base.hash.key;
521 out_err:
522 	vmw_resource_unreference(&res);
523 out_unlock:
524 	ttm_read_unlock(&vmaster->lock);
525 	return ret;
526 
527 }
528 
529 /**
530  * vmw_context_scrub_shader - scrub a shader binding from a context.
531  *
532  * @bi: single binding information.
533  * @rebind: Whether to issue a bind instead of scrub command.
534  */
535 static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
536 {
537 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
538 	struct {
539 		SVGA3dCmdHeader header;
540 		SVGA3dCmdSetShader body;
541 	} *cmd;
542 
543 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
544 	if (unlikely(cmd == NULL)) {
545 		DRM_ERROR("Failed reserving FIFO space for shader "
546 			  "unbinding.\n");
547 		return -ENOMEM;
548 	}
549 
550 	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
551 	cmd->header.size = sizeof(cmd->body);
552 	cmd->body.cid = bi->ctx->id;
553 	cmd->body.type = bi->i1.shader_type;
554 	cmd->body.shid =
555 		cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
556 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
557 
558 	return 0;
559 }
560 
561 /**
562  * vmw_context_scrub_render_target - scrub a render target binding
563  * from a context.
564  *
565  * @bi: single binding information.
566  * @rebind: Whether to issue a bind instead of scrub command.
567  */
568 static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
569 					   bool rebind)
570 {
571 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
572 	struct {
573 		SVGA3dCmdHeader header;
574 		SVGA3dCmdSetRenderTarget body;
575 	} *cmd;
576 
577 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
578 	if (unlikely(cmd == NULL)) {
579 		DRM_ERROR("Failed reserving FIFO space for render target "
580 			  "unbinding.\n");
581 		return -ENOMEM;
582 	}
583 
584 	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
585 	cmd->header.size = sizeof(cmd->body);
586 	cmd->body.cid = bi->ctx->id;
587 	cmd->body.type = bi->i1.rt_type;
588 	cmd->body.target.sid =
589 		cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
590 	cmd->body.target.face = 0;
591 	cmd->body.target.mipmap = 0;
592 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
593 
594 	return 0;
595 }
596 
597 /**
598  * vmw_context_scrub_texture - scrub a texture binding from a context.
599  *
600  * @bi: single binding information.
601  * @rebind: Whether to issue a bind instead of scrub command.
602  *
603  * TODO: Possibly complement this function with a function that takes
604  * a list of texture bindings and combines them to a single command.
605  */
606 static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
607 				     bool rebind)
608 {
609 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
610 	struct {
611 		SVGA3dCmdHeader header;
612 		struct {
613 			SVGA3dCmdSetTextureState c;
614 			SVGA3dTextureState s1;
615 		} body;
616 	} *cmd;
617 
618 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
619 	if (unlikely(cmd == NULL)) {
620 		DRM_ERROR("Failed reserving FIFO space for texture "
621 			  "unbinding.\n");
622 		return -ENOMEM;
623 	}
624 
625 
626 	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
627 	cmd->header.size = sizeof(cmd->body);
628 	cmd->body.c.cid = bi->ctx->id;
629 	cmd->body.s1.stage = bi->i1.texture_stage;
630 	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
631 	cmd->body.s1.value =
632 		cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
633 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
634 
635 	return 0;
636 }
637 
638 /**
639  * vmw_context_binding_drop: Stop tracking a context binding
640  *
641  * @cb: Pointer to binding tracker storage.
642  *
643  * Stops tracking a context binding, and re-initializes its storage.
644  * Typically used when the context binding is replaced with a binding to
645  * another (or the same, for that matter) resource.
646  */
647 static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
648 {
649 	list_del(&cb->ctx_list);
650 	if (!list_empty(&cb->res_list))
651 		list_del(&cb->res_list);
652 	cb->bi.ctx = NULL;
653 }
654 
655 /**
656  * vmw_context_binding_add: Start tracking a context binding
657  *
658  * @cbs: Pointer to the context binding state tracker.
659  * @bi: Information about the binding to track.
660  *
661  * Performs basic checks on the binding to make sure arguments are within
662  * bounds and then starts tracking the binding in the context binding
663  * state structure @cbs.
664  */
665 int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
666 			    const struct vmw_ctx_bindinfo *bi)
667 {
668 	struct vmw_ctx_binding *loc;
669 
670 	switch (bi->bt) {
671 	case vmw_ctx_binding_rt:
672 		if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
673 			DRM_ERROR("Illegal render target type %u.\n",
674 				  (unsigned) bi->i1.rt_type);
675 			return -EINVAL;
676 		}
677 		loc = &cbs->render_targets[bi->i1.rt_type];
678 		break;
679 	case vmw_ctx_binding_tex:
680 		if (unlikely((unsigned)bi->i1.texture_stage >=
681 			     SVGA3D_NUM_TEXTURE_UNITS)) {
682 			DRM_ERROR("Illegal texture/sampler unit %u.\n",
683 				  (unsigned) bi->i1.texture_stage);
684 			return -EINVAL;
685 		}
686 		loc = &cbs->texture_units[bi->i1.texture_stage];
687 		break;
688 	case vmw_ctx_binding_shader:
689 		if (unlikely((unsigned)bi->i1.shader_type >=
690 			     SVGA3D_SHADERTYPE_MAX)) {
691 			DRM_ERROR("Illegal shader type %u.\n",
692 				  (unsigned) bi->i1.shader_type);
693 			return -EINVAL;
694 		}
695 		loc = &cbs->shaders[bi->i1.shader_type];
696 		break;
697 	default:
698 		BUG();
699 	}
700 
701 	if (loc->bi.ctx != NULL)
702 		vmw_context_binding_drop(loc);
703 
704 	loc->bi = *bi;
705 	loc->bi.scrubbed = false;
706 	list_add_tail(&loc->ctx_list, &cbs->list);
707 	INIT_LIST_HEAD(&loc->res_list);
708 
709 	return 0;
710 }
711 
712 /**
713  * vmw_context_binding_transfer: Transfer a context binding tracking entry.
714  *
715  * @cbs: Pointer to the persistent context binding state tracker.
716  * @bi: Information about the binding to track.
717  *
718  */
719 static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
720 					 const struct vmw_ctx_bindinfo *bi)
721 {
722 	struct vmw_ctx_binding *loc;
723 
724 	switch (bi->bt) {
725 	case vmw_ctx_binding_rt:
726 		loc = &cbs->render_targets[bi->i1.rt_type];
727 		break;
728 	case vmw_ctx_binding_tex:
729 		loc = &cbs->texture_units[bi->i1.texture_stage];
730 		break;
731 	case vmw_ctx_binding_shader:
732 		loc = &cbs->shaders[bi->i1.shader_type];
733 		break;
734 	default:
735 		BUG();
736 	}
737 
738 	if (loc->bi.ctx != NULL)
739 		vmw_context_binding_drop(loc);
740 
741 	if (bi->res != NULL) {
742 		loc->bi = *bi;
743 		list_add_tail(&loc->ctx_list, &cbs->list);
744 		list_add_tail(&loc->res_list, &bi->res->binding_head);
745 	}
746 }
747 
748 /**
749  * vmw_context_binding_kill - Kill a binding on the device
750  * and stop tracking it.
751  *
752  * @cb: Pointer to binding tracker storage.
753  *
754  * Emits FIFO commands to scrub a binding represented by @cb.
755  * Then stops tracking the binding and re-initializes its storage.
756  */
757 static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
758 {
759 	if (!cb->bi.scrubbed) {
760 		(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
761 		cb->bi.scrubbed = true;
762 	}
763 	vmw_context_binding_drop(cb);
764 }
765 
766 /**
767  * vmw_context_binding_state_kill - Kill all bindings associated with a
768  * struct vmw_ctx_binding state structure, and re-initialize the structure.
769  *
770  * @cbs: Pointer to the context binding state tracker.
771  *
772  * Emits commands to scrub all bindings associated with the
773  * context binding state tracker. Then re-initializes the whole structure.
774  */
775 static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
776 {
777 	struct vmw_ctx_binding *entry, *next;
778 
779 	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
780 		vmw_context_binding_kill(entry);
781 }
782 
783 /**
784  * vmw_context_binding_state_scrub - Scrub all bindings associated with a
785  * struct vmw_ctx_binding state structure.
786  *
787  * @cbs: Pointer to the context binding state tracker.
788  *
789  * Emits commands to scrub all bindings associated with the
790  * context binding state tracker.
791  */
792 static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
793 {
794 	struct vmw_ctx_binding *entry;
795 
796 	list_for_each_entry(entry, &cbs->list, ctx_list) {
797 		if (!entry->bi.scrubbed) {
798 			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
799 			entry->bi.scrubbed = true;
800 		}
801 	}
802 }
803 
804 /**
805  * vmw_context_binding_res_list_kill - Kill all bindings on a
806  * resource binding list
807  *
808  * @head: list head of resource binding list
809  *
810  * Kills all bindings associated with a specific resource. Typically
811  * called before the resource is destroyed.
812  */
813 void vmw_context_binding_res_list_kill(struct list_head *head)
814 {
815 	struct vmw_ctx_binding *entry, *next;
816 
817 	list_for_each_entry_safe(entry, next, head, res_list)
818 		vmw_context_binding_kill(entry);
819 }
820 
821 /**
822  * vmw_context_binding_res_list_scrub - Scrub all bindings on a
823  * resource binding list
824  *
825  * @head: list head of resource binding list
826  *
827  * Scrub all bindings associated with a specific resource. Typically
828  * called before the resource is evicted.
829  */
830 void vmw_context_binding_res_list_scrub(struct list_head *head)
831 {
832 	struct vmw_ctx_binding *entry;
833 
834 	list_for_each_entry(entry, head, res_list) {
835 		if (!entry->bi.scrubbed) {
836 			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
837 			entry->bi.scrubbed = true;
838 		}
839 	}
840 }
841 
842 /**
843  * vmw_context_binding_state_transfer - Commit staged binding info
844  *
845  * @ctx: Pointer to context to commit the staged binding info to.
846  * @from: Staged binding info built during execbuf.
847  *
848  * Transfers binding info from a temporary structure to the persistent
849  * structure in the context. This can be done once commands
850  */
851 void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
852 					struct vmw_ctx_binding_state *from)
853 {
854 	struct vmw_user_context *uctx =
855 		container_of(ctx, struct vmw_user_context, res);
856 	struct vmw_ctx_binding *entry, *next;
857 
858 	list_for_each_entry_safe(entry, next, &from->list, ctx_list)
859 		vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
860 }
861 
862 /**
863  * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
864  *
865  * @ctx: The context resource
866  *
867  * Walks through the context binding list and rebinds all scrubbed
868  * resources.
869  */
870 int vmw_context_rebind_all(struct vmw_resource *ctx)
871 {
872 	struct vmw_ctx_binding *entry;
873 	struct vmw_user_context *uctx =
874 		container_of(ctx, struct vmw_user_context, res);
875 	struct vmw_ctx_binding_state *cbs = &uctx->cbs;
876 	int ret;
877 
878 	list_for_each_entry(entry, &cbs->list, ctx_list) {
879 		if (likely(!entry->bi.scrubbed))
880 			continue;
881 
882 		if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
883 			    SVGA3D_INVALID_ID))
884 			continue;
885 
886 		ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
887 		if (unlikely(ret != 0))
888 			return ret;
889 
890 		entry->bi.scrubbed = false;
891 	}
892 
893 	return 0;
894 }
895 
896 /**
897  * vmw_context_binding_list - Return a list of context bindings
898  *
899  * @ctx: The context resource
900  *
901  * Returns the current list of bindings of the given context. Note that
902  * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
903  */
904 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
905 {
906 	return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
907 }
908