1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Treat context OTables as resources to make use of the resource
29  * backing MOB eviction mechanism, that is used to read back the COTable
30  * whenever the backing MOB is evicted.
31  */
32 
33 #include <drm/ttm/ttm_placement.h>
34 
35 #include "vmwgfx_drv.h"
36 #include "vmwgfx_mksstat.h"
37 #include "vmwgfx_resource_priv.h"
38 #include "vmwgfx_so.h"
39 
40 /**
41  * struct vmw_cotable - Context Object Table resource
42  *
43  * @res: struct vmw_resource we are deriving from.
44  * @ctx: non-refcounted pointer to the owning context.
45  * @size_read_back: Size of data read back during eviction.
46  * @seen_entries: Seen entries in command stream for this cotable.
47  * @type: The cotable type.
48  * @scrubbed: Whether the cotable has been scrubbed.
49  * @resource_list: List of resources in the cotable.
50  */
51 struct vmw_cotable {
52 	struct vmw_resource res;
53 	struct vmw_resource *ctx;
54 	size_t size_read_back;
55 	int seen_entries;
56 	u32 type;
57 	bool scrubbed;
58 	struct list_head resource_list;
59 };
60 
61 /**
62  * struct vmw_cotable_info - Static info about cotable types
63  *
64  * @min_initial_entries: Min number of initial intries at cotable allocation
65  * for this cotable type.
66  * @size: Size of each entry.
67  * @unbind_func: Unbind call-back function.
68  */
69 struct vmw_cotable_info {
70 	u32 min_initial_entries;
71 	u32 size;
72 	void (*unbind_func)(struct vmw_private *, struct list_head *,
73 			    bool);
74 };
75 
76 
77 /*
78  * Getting the initial size right is difficult because it all depends
79  * on what the userspace is doing. The sizes will be aligned up to
80  * a PAGE_SIZE so we just want to make sure that for majority of apps
81  * the initial number of entries doesn't require an immediate resize.
82  * For all cotables except SVGACOTableDXElementLayoutEntry and
83  * SVGACOTableDXBlendStateEntry the initial number of entries fits
84  * within the PAGE_SIZE. For SVGACOTableDXElementLayoutEntry and
85  * SVGACOTableDXBlendStateEntry we want to reserve two pages,
86  * because that's what all apps will require initially.
87  */
88 static const struct vmw_cotable_info co_info[] = {
89 	{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
90 	{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
91 	{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
92 	{PAGE_SIZE/sizeof(SVGACOTableDXElementLayoutEntry) + 1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
93 	{PAGE_SIZE/sizeof(SVGACOTableDXBlendStateEntry) + 1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
94 	{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
95 	{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
96 	{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
97 	{1, sizeof(SVGACOTableDXStreamOutputEntry), &vmw_dx_streamoutput_cotable_list_scrub},
98 	{1, sizeof(SVGACOTableDXQueryEntry), NULL},
99 	{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub},
100 	{1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy}
101 };
102 
103 /*
104  * Cotables with bindings that we remove must be scrubbed first,
105  * otherwise, the device will swap in an invalid context when we remove
106  * bindings before scrubbing a cotable...
107  */
108 const SVGACOTableType vmw_cotable_scrub_order[] = {
109 	SVGA_COTABLE_RTVIEW,
110 	SVGA_COTABLE_DSVIEW,
111 	SVGA_COTABLE_SRVIEW,
112 	SVGA_COTABLE_DXSHADER,
113 	SVGA_COTABLE_ELEMENTLAYOUT,
114 	SVGA_COTABLE_BLENDSTATE,
115 	SVGA_COTABLE_DEPTHSTENCIL,
116 	SVGA_COTABLE_RASTERIZERSTATE,
117 	SVGA_COTABLE_SAMPLER,
118 	SVGA_COTABLE_STREAMOUTPUT,
119 	SVGA_COTABLE_DXQUERY,
120 	SVGA_COTABLE_UAVIEW,
121 };
122 
123 static int vmw_cotable_bind(struct vmw_resource *res,
124 			    struct ttm_validate_buffer *val_buf);
125 static int vmw_cotable_unbind(struct vmw_resource *res,
126 			      bool readback,
127 			      struct ttm_validate_buffer *val_buf);
128 static int vmw_cotable_create(struct vmw_resource *res);
129 static int vmw_cotable_destroy(struct vmw_resource *res);
130 
131 static const struct vmw_res_func vmw_cotable_func = {
132 	.res_type = vmw_res_cotable,
133 	.needs_backup = true,
134 	.may_evict = true,
135 	.prio = 3,
136 	.dirty_prio = 3,
137 	.type_name = "context guest backed object tables",
138 	.backup_placement = &vmw_mob_placement,
139 	.create = vmw_cotable_create,
140 	.destroy = vmw_cotable_destroy,
141 	.bind = vmw_cotable_bind,
142 	.unbind = vmw_cotable_unbind,
143 };
144 
145 /**
146  * vmw_cotable - Convert a struct vmw_resource pointer to a struct
147  * vmw_cotable pointer
148  *
149  * @res: Pointer to the resource.
150  */
151 static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
152 {
153 	return container_of(res, struct vmw_cotable, res);
154 }
155 
156 /**
157  * vmw_cotable_destroy - Cotable resource destroy callback
158  *
159  * @res: Pointer to the cotable resource.
160  *
161  * There is no device cotable destroy command, so this function only
162  * makes sure that the resource id is set to invalid.
163  */
164 static int vmw_cotable_destroy(struct vmw_resource *res)
165 {
166 	res->id = -1;
167 	return 0;
168 }
169 
170 /**
171  * vmw_cotable_unscrub - Undo a cotable unscrub operation
172  *
173  * @res: Pointer to the cotable resource
174  *
175  * This function issues commands to (re)bind the cotable to
176  * its backing mob, which needs to be validated and reserved at this point.
177  * This is identical to bind() except the function interface looks different.
178  */
179 static int vmw_cotable_unscrub(struct vmw_resource *res)
180 {
181 	struct vmw_cotable *vcotbl = vmw_cotable(res);
182 	struct vmw_private *dev_priv = res->dev_priv;
183 	struct ttm_buffer_object *bo = &res->backup->base;
184 	struct {
185 		SVGA3dCmdHeader header;
186 		SVGA3dCmdDXSetCOTable body;
187 	} *cmd;
188 
189 	WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
190 	dma_resv_assert_held(bo->base.resv);
191 
192 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
193 	if (!cmd)
194 		return -ENOMEM;
195 
196 	WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
197 	WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
198 	cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
199 	cmd->header.size = sizeof(cmd->body);
200 	cmd->body.cid = vcotbl->ctx->id;
201 	cmd->body.type = vcotbl->type;
202 	cmd->body.mobid = bo->resource->start;
203 	cmd->body.validSizeInBytes = vcotbl->size_read_back;
204 
205 	vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
206 	vcotbl->scrubbed = false;
207 
208 	return 0;
209 }
210 
211 /**
212  * vmw_cotable_bind - Undo a cotable unscrub operation
213  *
214  * @res: Pointer to the cotable resource
215  * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
216  * for convenience / fencing.
217  *
218  * This function issues commands to (re)bind the cotable to
219  * its backing mob, which needs to be validated and reserved at this point.
220  */
221 static int vmw_cotable_bind(struct vmw_resource *res,
222 			    struct ttm_validate_buffer *val_buf)
223 {
224 	/*
225 	 * The create() callback may have changed @res->backup without
226 	 * the caller noticing, and with val_buf->bo still pointing to
227 	 * the old backup buffer. Although hackish, and not used currently,
228 	 * take the opportunity to correct the value here so that it's not
229 	 * misused in the future.
230 	 */
231 	val_buf->bo = &res->backup->base;
232 
233 	return vmw_cotable_unscrub(res);
234 }
235 
236 /**
237  * vmw_cotable_scrub - Scrub the cotable from the device.
238  *
239  * @res: Pointer to the cotable resource.
240  * @readback: Whether initiate a readback of the cotable data to the backup
241  * buffer.
242  *
243  * In some situations (context swapouts) it might be desirable to make the
244  * device forget about the cotable without performing a full unbind. A full
245  * unbind requires reserved backup buffers and it might not be possible to
246  * reserve them due to locking order violation issues. The vmw_cotable_scrub
247  * function implements a partial unbind() without that requirement but with the
248  * following restrictions.
249  * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
250  *    be called.
251  * 2) Before the cotable backing buffer is used by the CPU, or during the
252  *    resource destruction, vmw_cotable_unbind() must be called.
253  */
254 int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
255 {
256 	struct vmw_cotable *vcotbl = vmw_cotable(res);
257 	struct vmw_private *dev_priv = res->dev_priv;
258 	size_t submit_size;
259 
260 	struct {
261 		SVGA3dCmdHeader header;
262 		SVGA3dCmdDXReadbackCOTable body;
263 	} *cmd0;
264 	struct {
265 		SVGA3dCmdHeader header;
266 		SVGA3dCmdDXSetCOTable body;
267 	} *cmd1;
268 
269 	if (vcotbl->scrubbed)
270 		return 0;
271 
272 	if (co_info[vcotbl->type].unbind_func)
273 		co_info[vcotbl->type].unbind_func(dev_priv,
274 						  &vcotbl->resource_list,
275 						  readback);
276 	submit_size = sizeof(*cmd1);
277 	if (readback)
278 		submit_size += sizeof(*cmd0);
279 
280 	cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
281 	if (!cmd1)
282 		return -ENOMEM;
283 
284 	vcotbl->size_read_back = 0;
285 	if (readback) {
286 		cmd0 = (void *) cmd1;
287 		cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
288 		cmd0->header.size = sizeof(cmd0->body);
289 		cmd0->body.cid = vcotbl->ctx->id;
290 		cmd0->body.type = vcotbl->type;
291 		cmd1 = (void *) &cmd0[1];
292 		vcotbl->size_read_back = res->backup_size;
293 	}
294 	cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
295 	cmd1->header.size = sizeof(cmd1->body);
296 	cmd1->body.cid = vcotbl->ctx->id;
297 	cmd1->body.type = vcotbl->type;
298 	cmd1->body.mobid = SVGA3D_INVALID_ID;
299 	cmd1->body.validSizeInBytes = 0;
300 	vmw_cmd_commit_flush(dev_priv, submit_size);
301 	vcotbl->scrubbed = true;
302 
303 	/* Trigger a create() on next validate. */
304 	res->id = -1;
305 
306 	return 0;
307 }
308 
309 /**
310  * vmw_cotable_unbind - Cotable resource unbind callback
311  *
312  * @res: Pointer to the cotable resource.
313  * @readback: Whether to read back cotable data to the backup buffer.
314  * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
315  * for convenience / fencing.
316  *
317  * Unbinds the cotable from the device and fences the backup buffer.
318  */
319 static int vmw_cotable_unbind(struct vmw_resource *res,
320 			      bool readback,
321 			      struct ttm_validate_buffer *val_buf)
322 {
323 	struct vmw_cotable *vcotbl = vmw_cotable(res);
324 	struct vmw_private *dev_priv = res->dev_priv;
325 	struct ttm_buffer_object *bo = val_buf->bo;
326 	struct vmw_fence_obj *fence;
327 
328 	if (!vmw_resource_mob_attached(res))
329 		return 0;
330 
331 	WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
332 	dma_resv_assert_held(bo->base.resv);
333 
334 	mutex_lock(&dev_priv->binding_mutex);
335 	if (!vcotbl->scrubbed)
336 		vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
337 	mutex_unlock(&dev_priv->binding_mutex);
338 	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
339 	vmw_bo_fence_single(bo, fence);
340 	if (likely(fence != NULL))
341 		vmw_fence_obj_unreference(&fence);
342 
343 	return 0;
344 }
345 
346 /**
347  * vmw_cotable_readback - Read back a cotable without unbinding.
348  *
349  * @res: The cotable resource.
350  *
351  * Reads back a cotable to its backing mob without scrubbing the MOB from
352  * the cotable. The MOB is fenced for subsequent CPU access.
353  */
354 static int vmw_cotable_readback(struct vmw_resource *res)
355 {
356 	struct vmw_cotable *vcotbl = vmw_cotable(res);
357 	struct vmw_private *dev_priv = res->dev_priv;
358 
359 	struct {
360 		SVGA3dCmdHeader header;
361 		SVGA3dCmdDXReadbackCOTable body;
362 	} *cmd;
363 	struct vmw_fence_obj *fence;
364 
365 	if (!vcotbl->scrubbed) {
366 		cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
367 		if (!cmd)
368 			return -ENOMEM;
369 
370 		cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
371 		cmd->header.size = sizeof(cmd->body);
372 		cmd->body.cid = vcotbl->ctx->id;
373 		cmd->body.type = vcotbl->type;
374 		vcotbl->size_read_back = res->backup_size;
375 		vmw_cmd_commit(dev_priv, sizeof(*cmd));
376 	}
377 
378 	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
379 	vmw_bo_fence_single(&res->backup->base, fence);
380 	vmw_fence_obj_unreference(&fence);
381 
382 	return 0;
383 }
384 
385 /**
386  * vmw_cotable_resize - Resize a cotable.
387  *
388  * @res: The cotable resource.
389  * @new_size: The new size.
390  *
391  * Resizes a cotable and binds the new backup buffer.
392  * On failure the cotable is left intact.
393  * Important! This function may not fail once the MOB switch has been
394  * committed to hardware. That would put the device context in an
395  * invalid state which we can't currently recover from.
396  */
397 static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
398 {
399 	struct ttm_operation_ctx ctx = { false, false };
400 	struct vmw_private *dev_priv = res->dev_priv;
401 	struct vmw_cotable *vcotbl = vmw_cotable(res);
402 	struct vmw_buffer_object *buf, *old_buf = res->backup;
403 	struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
404 	size_t old_size = res->backup_size;
405 	size_t old_size_read_back = vcotbl->size_read_back;
406 	size_t cur_size_read_back;
407 	struct ttm_bo_kmap_obj old_map, new_map;
408 	int ret;
409 	size_t i;
410 
411 	MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE);
412 	MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE);
413 
414 	ret = vmw_cotable_readback(res);
415 	if (ret)
416 		goto out_done;
417 
418 	cur_size_read_back = vcotbl->size_read_back;
419 	vcotbl->size_read_back = old_size_read_back;
420 
421 	/*
422 	 * While device is processing, Allocate and reserve a buffer object
423 	 * for the new COTable. Initially pin the buffer object to make sure
424 	 * we can use tryreserve without failure.
425 	 */
426 	ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement,
427 			    true, true, vmw_bo_bo_free, &buf);
428 	if (ret) {
429 		DRM_ERROR("Failed initializing new cotable MOB.\n");
430 		goto out_done;
431 	}
432 
433 	bo = &buf->base;
434 	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
435 
436 	ret = ttm_bo_wait(old_bo, false, false);
437 	if (unlikely(ret != 0)) {
438 		DRM_ERROR("Failed waiting for cotable unbind.\n");
439 		goto out_wait;
440 	}
441 
442 	/*
443 	 * Do a page by page copy of COTables. This eliminates slow vmap()s.
444 	 * This should really be a TTM utility.
445 	 */
446 	for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) {
447 		bool dummy;
448 
449 		ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
450 		if (unlikely(ret != 0)) {
451 			DRM_ERROR("Failed mapping old COTable on resize.\n");
452 			goto out_wait;
453 		}
454 		ret = ttm_bo_kmap(bo, i, 1, &new_map);
455 		if (unlikely(ret != 0)) {
456 			DRM_ERROR("Failed mapping new COTable on resize.\n");
457 			goto out_map_new;
458 		}
459 		memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
460 		       ttm_kmap_obj_virtual(&old_map, &dummy),
461 		       PAGE_SIZE);
462 		ttm_bo_kunmap(&new_map);
463 		ttm_bo_kunmap(&old_map);
464 	}
465 
466 	/* Unpin new buffer, and switch backup buffers. */
467 	ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
468 	if (unlikely(ret != 0)) {
469 		DRM_ERROR("Failed validating new COTable backup buffer.\n");
470 		goto out_wait;
471 	}
472 
473 	vmw_resource_mob_detach(res);
474 	res->backup = buf;
475 	res->backup_size = new_size;
476 	vcotbl->size_read_back = cur_size_read_back;
477 
478 	/*
479 	 * Now tell the device to switch. If this fails, then we need to
480 	 * revert the full resize.
481 	 */
482 	ret = vmw_cotable_unscrub(res);
483 	if (ret) {
484 		DRM_ERROR("Failed switching COTable backup buffer.\n");
485 		res->backup = old_buf;
486 		res->backup_size = old_size;
487 		vcotbl->size_read_back = old_size_read_back;
488 		vmw_resource_mob_attach(res);
489 		goto out_wait;
490 	}
491 
492 	vmw_resource_mob_attach(res);
493 	/* Let go of the old mob. */
494 	vmw_bo_unreference(&old_buf);
495 	res->id = vcotbl->type;
496 
497 	ret = dma_resv_reserve_fences(bo->base.resv, 1);
498 	if (unlikely(ret))
499 		goto out_wait;
500 
501 	/* Release the pin acquired in vmw_bo_init */
502 	ttm_bo_unpin(bo);
503 
504 	MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
505 
506 	return 0;
507 
508 out_map_new:
509 	ttm_bo_kunmap(&old_map);
510 out_wait:
511 	ttm_bo_unpin(bo);
512 	ttm_bo_unreserve(bo);
513 	vmw_bo_unreference(&buf);
514 
515 out_done:
516 	MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
517 
518 	return ret;
519 }
520 
521 /**
522  * vmw_cotable_create - Cotable resource create callback
523  *
524  * @res: Pointer to a cotable resource.
525  *
526  * There is no separate create command for cotables, so this callback, which
527  * is called before bind() in the validation sequence is instead used for two
528  * things.
529  * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
530  *    buffer.
531  * 2) Resize the cotable if needed.
532  */
533 static int vmw_cotable_create(struct vmw_resource *res)
534 {
535 	struct vmw_cotable *vcotbl = vmw_cotable(res);
536 	size_t new_size = res->backup_size;
537 	size_t needed_size;
538 	int ret;
539 
540 	/* Check whether we need to resize the cotable */
541 	needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
542 	while (needed_size > new_size)
543 		new_size *= 2;
544 
545 	if (likely(new_size <= res->backup_size)) {
546 		if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
547 			ret = vmw_cotable_unscrub(res);
548 			if (ret)
549 				return ret;
550 		}
551 		res->id = vcotbl->type;
552 		return 0;
553 	}
554 
555 	return vmw_cotable_resize(res, new_size);
556 }
557 
558 /**
559  * vmw_hw_cotable_destroy - Cotable hw_destroy callback
560  *
561  * @res: Pointer to a cotable resource.
562  *
563  * The final (part of resource destruction) destroy callback.
564  */
565 static void vmw_hw_cotable_destroy(struct vmw_resource *res)
566 {
567 	(void) vmw_cotable_destroy(res);
568 }
569 
570 /**
571  * vmw_cotable_free - Cotable resource destructor
572  *
573  * @res: Pointer to a cotable resource.
574  */
575 static void vmw_cotable_free(struct vmw_resource *res)
576 {
577 	kfree(res);
578 }
579 
580 /**
581  * vmw_cotable_alloc - Create a cotable resource
582  *
583  * @dev_priv: Pointer to a device private struct.
584  * @ctx: Pointer to the context resource.
585  * The cotable resource will not add a refcount.
586  * @type: The cotable type.
587  */
588 struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
589 				       struct vmw_resource *ctx,
590 				       u32 type)
591 {
592 	struct vmw_cotable *vcotbl;
593 	int ret;
594 	u32 num_entries;
595 
596 	vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
597 	if (unlikely(!vcotbl)) {
598 		ret = -ENOMEM;
599 		goto out_no_alloc;
600 	}
601 
602 	ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
603 				vmw_cotable_free, &vmw_cotable_func);
604 	if (unlikely(ret != 0))
605 		goto out_no_init;
606 
607 	INIT_LIST_HEAD(&vcotbl->resource_list);
608 	vcotbl->res.id = type;
609 	vcotbl->res.backup_size = PAGE_SIZE;
610 	num_entries = PAGE_SIZE / co_info[type].size;
611 	if (num_entries < co_info[type].min_initial_entries) {
612 		vcotbl->res.backup_size = co_info[type].min_initial_entries *
613 			co_info[type].size;
614 		vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size);
615 	}
616 
617 	vcotbl->scrubbed = true;
618 	vcotbl->seen_entries = -1;
619 	vcotbl->type = type;
620 	vcotbl->ctx = ctx;
621 
622 	vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
623 
624 	return &vcotbl->res;
625 
626 out_no_init:
627 	kfree(vcotbl);
628 out_no_alloc:
629 	return ERR_PTR(ret);
630 }
631 
632 /**
633  * vmw_cotable_notify - Notify the cotable about an item creation
634  *
635  * @res: Pointer to a cotable resource.
636  * @id: Item id.
637  */
638 int vmw_cotable_notify(struct vmw_resource *res, int id)
639 {
640 	struct vmw_cotable *vcotbl = vmw_cotable(res);
641 
642 	if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
643 		DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
644 			  (unsigned) vcotbl->type, id);
645 		return -EINVAL;
646 	}
647 
648 	if (vcotbl->seen_entries < id) {
649 		/* Trigger a call to create() on next validate */
650 		res->id = -1;
651 		vcotbl->seen_entries = id;
652 	}
653 
654 	return 0;
655 }
656 
657 /**
658  * vmw_cotable_add_resource - add a view to the cotable's list of active views.
659  *
660  * @res: pointer struct vmw_resource representing the cotable.
661  * @head: pointer to the struct list_head member of the resource, dedicated
662  * to the cotable active resource list.
663  */
664 void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
665 {
666 	struct vmw_cotable *vcotbl =
667 		container_of(res, struct vmw_cotable, res);
668 
669 	list_add_tail(head, &vcotbl->resource_list);
670 }
671