1dff96888SDirk Hohndel (VMware) // SPDX-License-Identifier: GPL-2.0 OR MIT
2d80efd5cSThomas Hellstrom /**************************************************************************
3d80efd5cSThomas Hellstrom  *
409881d29SZack Rusin  * Copyright 2014-2023 VMware, Inc., Palo Alto, CA., USA
5d80efd5cSThomas Hellstrom  *
6d80efd5cSThomas Hellstrom  * Permission is hereby granted, free of charge, to any person obtaining a
7d80efd5cSThomas Hellstrom  * copy of this software and associated documentation files (the
8d80efd5cSThomas Hellstrom  * "Software"), to deal in the Software without restriction, including
9d80efd5cSThomas Hellstrom  * without limitation the rights to use, copy, modify, merge, publish,
10d80efd5cSThomas Hellstrom  * distribute, sub license, and/or sell copies of the Software, and to
11d80efd5cSThomas Hellstrom  * permit persons to whom the Software is furnished to do so, subject to
12d80efd5cSThomas Hellstrom  * the following conditions:
13d80efd5cSThomas Hellstrom  *
14d80efd5cSThomas Hellstrom  * The above copyright notice and this permission notice (including the
15d80efd5cSThomas Hellstrom  * next paragraph) shall be included in all copies or substantial portions
16d80efd5cSThomas Hellstrom  * of the Software.
17d80efd5cSThomas Hellstrom  *
18d80efd5cSThomas Hellstrom  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19d80efd5cSThomas Hellstrom  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20d80efd5cSThomas Hellstrom  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21d80efd5cSThomas Hellstrom  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22d80efd5cSThomas Hellstrom  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23d80efd5cSThomas Hellstrom  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24d80efd5cSThomas Hellstrom  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25d80efd5cSThomas Hellstrom  *
26d80efd5cSThomas Hellstrom  **************************************************************************/
27d80efd5cSThomas Hellstrom /*
28d80efd5cSThomas Hellstrom  * Treat context OTables as resources to make use of the resource
29d80efd5cSThomas Hellstrom  * backing MOB eviction mechanism, that is used to read back the COTable
30d80efd5cSThomas Hellstrom  * whenever the backing MOB is evicted.
31d80efd5cSThomas Hellstrom  */
32d80efd5cSThomas Hellstrom 
3309881d29SZack Rusin #include "vmwgfx_bo.h"
34d80efd5cSThomas Hellstrom #include "vmwgfx_drv.h"
354bb50606SZack Rusin #include "vmwgfx_mksstat.h"
36d80efd5cSThomas Hellstrom #include "vmwgfx_resource_priv.h"
37d80efd5cSThomas Hellstrom #include "vmwgfx_so.h"
38d80efd5cSThomas Hellstrom 
3909881d29SZack Rusin #include <drm/ttm/ttm_placement.h>
4009881d29SZack Rusin 
41d80efd5cSThomas Hellstrom /**
42d80efd5cSThomas Hellstrom  * struct vmw_cotable - Context Object Table resource
43d80efd5cSThomas Hellstrom  *
44d80efd5cSThomas Hellstrom  * @res: struct vmw_resource we are deriving from.
45d80efd5cSThomas Hellstrom  * @ctx: non-refcounted pointer to the owning context.
46d80efd5cSThomas Hellstrom  * @size_read_back: Size of data read back during eviction.
47d80efd5cSThomas Hellstrom  * @seen_entries: Seen entries in command stream for this cotable.
48d80efd5cSThomas Hellstrom  * @type: The cotable type.
49d80efd5cSThomas Hellstrom  * @scrubbed: Whether the cotable has been scrubbed.
50d80efd5cSThomas Hellstrom  * @resource_list: List of resources in the cotable.
51d80efd5cSThomas Hellstrom  */
52d80efd5cSThomas Hellstrom struct vmw_cotable {
53d80efd5cSThomas Hellstrom 	struct vmw_resource res;
54d80efd5cSThomas Hellstrom 	struct vmw_resource *ctx;
55d80efd5cSThomas Hellstrom 	size_t size_read_back;
56d80efd5cSThomas Hellstrom 	int seen_entries;
57d80efd5cSThomas Hellstrom 	u32 type;
58d80efd5cSThomas Hellstrom 	bool scrubbed;
59d80efd5cSThomas Hellstrom 	struct list_head resource_list;
60d80efd5cSThomas Hellstrom };
61d80efd5cSThomas Hellstrom 
62d80efd5cSThomas Hellstrom /**
63d80efd5cSThomas Hellstrom  * struct vmw_cotable_info - Static info about cotable types
64d80efd5cSThomas Hellstrom  *
65d80efd5cSThomas Hellstrom  * @min_initial_entries: Min number of initial intries at cotable allocation
66d80efd5cSThomas Hellstrom  * for this cotable type.
67d80efd5cSThomas Hellstrom  * @size: Size of each entry.
683894709eSLee Jones  * @unbind_func: Unbind call-back function.
69d80efd5cSThomas Hellstrom  */
70d80efd5cSThomas Hellstrom struct vmw_cotable_info {
71d80efd5cSThomas Hellstrom 	u32 min_initial_entries;
72d80efd5cSThomas Hellstrom 	u32 size;
73d80efd5cSThomas Hellstrom 	void (*unbind_func)(struct vmw_private *, struct list_head *,
74d80efd5cSThomas Hellstrom 			    bool);
75d80efd5cSThomas Hellstrom };
76d80efd5cSThomas Hellstrom 
77148e5f55SZack Rusin 
78148e5f55SZack Rusin /*
79148e5f55SZack Rusin  * Getting the initial size right is difficult because it all depends
80148e5f55SZack Rusin  * on what the userspace is doing. The sizes will be aligned up to
81148e5f55SZack Rusin  * a PAGE_SIZE so we just want to make sure that for majority of apps
82148e5f55SZack Rusin  * the initial number of entries doesn't require an immediate resize.
83148e5f55SZack Rusin  * For all cotables except SVGACOTableDXElementLayoutEntry and
84148e5f55SZack Rusin  * SVGACOTableDXBlendStateEntry the initial number of entries fits
85148e5f55SZack Rusin  * within the PAGE_SIZE. For SVGACOTableDXElementLayoutEntry and
86148e5f55SZack Rusin  * SVGACOTableDXBlendStateEntry we want to reserve two pages,
87148e5f55SZack Rusin  * because that's what all apps will require initially.
88148e5f55SZack Rusin  */
89d80efd5cSThomas Hellstrom static const struct vmw_cotable_info co_info[] = {
90d80efd5cSThomas Hellstrom 	{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
91d80efd5cSThomas Hellstrom 	{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
92d80efd5cSThomas Hellstrom 	{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
93148e5f55SZack Rusin 	{PAGE_SIZE/sizeof(SVGACOTableDXElementLayoutEntry) + 1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
94148e5f55SZack Rusin 	{PAGE_SIZE/sizeof(SVGACOTableDXBlendStateEntry) + 1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
95d80efd5cSThomas Hellstrom 	{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
96d80efd5cSThomas Hellstrom 	{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
97d80efd5cSThomas Hellstrom 	{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
98e8bead9cSDeepak Rawat 	{1, sizeof(SVGACOTableDXStreamOutputEntry), &vmw_dx_streamoutput_cotable_list_scrub},
99d80efd5cSThomas Hellstrom 	{1, sizeof(SVGACOTableDXQueryEntry), NULL},
1005e8ec0d9SDeepak Rawat 	{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub},
1015e8ec0d9SDeepak Rawat 	{1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy}
102d80efd5cSThomas Hellstrom };
103d80efd5cSThomas Hellstrom 
104d80efd5cSThomas Hellstrom /*
105d80efd5cSThomas Hellstrom  * Cotables with bindings that we remove must be scrubbed first,
106d80efd5cSThomas Hellstrom  * otherwise, the device will swap in an invalid context when we remove
107d80efd5cSThomas Hellstrom  * bindings before scrubbing a cotable...
108d80efd5cSThomas Hellstrom  */
109d80efd5cSThomas Hellstrom const SVGACOTableType vmw_cotable_scrub_order[] = {
110d80efd5cSThomas Hellstrom 	SVGA_COTABLE_RTVIEW,
111d80efd5cSThomas Hellstrom 	SVGA_COTABLE_DSVIEW,
112d80efd5cSThomas Hellstrom 	SVGA_COTABLE_SRVIEW,
113d80efd5cSThomas Hellstrom 	SVGA_COTABLE_DXSHADER,
114d80efd5cSThomas Hellstrom 	SVGA_COTABLE_ELEMENTLAYOUT,
115d80efd5cSThomas Hellstrom 	SVGA_COTABLE_BLENDSTATE,
116d80efd5cSThomas Hellstrom 	SVGA_COTABLE_DEPTHSTENCIL,
117d80efd5cSThomas Hellstrom 	SVGA_COTABLE_RASTERIZERSTATE,
118d80efd5cSThomas Hellstrom 	SVGA_COTABLE_SAMPLER,
119d80efd5cSThomas Hellstrom 	SVGA_COTABLE_STREAMOUTPUT,
120d80efd5cSThomas Hellstrom 	SVGA_COTABLE_DXQUERY,
1215e8ec0d9SDeepak Rawat 	SVGA_COTABLE_UAVIEW,
122d80efd5cSThomas Hellstrom };
123d80efd5cSThomas Hellstrom 
124d80efd5cSThomas Hellstrom static int vmw_cotable_bind(struct vmw_resource *res,
125d80efd5cSThomas Hellstrom 			    struct ttm_validate_buffer *val_buf);
126d80efd5cSThomas Hellstrom static int vmw_cotable_unbind(struct vmw_resource *res,
127d80efd5cSThomas Hellstrom 			      bool readback,
128d80efd5cSThomas Hellstrom 			      struct ttm_validate_buffer *val_buf);
129d80efd5cSThomas Hellstrom static int vmw_cotable_create(struct vmw_resource *res);
130d80efd5cSThomas Hellstrom static int vmw_cotable_destroy(struct vmw_resource *res);
131d80efd5cSThomas Hellstrom 
132d80efd5cSThomas Hellstrom static const struct vmw_res_func vmw_cotable_func = {
133d80efd5cSThomas Hellstrom 	.res_type = vmw_res_cotable,
134668b2066SZack Rusin 	.needs_guest_memory = true,
135d80efd5cSThomas Hellstrom 	.may_evict = true,
136a0a63940SThomas Hellstrom 	.prio = 3,
137a0a63940SThomas Hellstrom 	.dirty_prio = 3,
138d80efd5cSThomas Hellstrom 	.type_name = "context guest backed object tables",
13939985eeaSZack Rusin 	.domain = VMW_BO_DOMAIN_MOB,
14039985eeaSZack Rusin 	.busy_domain = VMW_BO_DOMAIN_MOB,
141d80efd5cSThomas Hellstrom 	.create = vmw_cotable_create,
142d80efd5cSThomas Hellstrom 	.destroy = vmw_cotable_destroy,
143d80efd5cSThomas Hellstrom 	.bind = vmw_cotable_bind,
144d80efd5cSThomas Hellstrom 	.unbind = vmw_cotable_unbind,
145d80efd5cSThomas Hellstrom };
146d80efd5cSThomas Hellstrom 
147d80efd5cSThomas Hellstrom /**
148d80efd5cSThomas Hellstrom  * vmw_cotable - Convert a struct vmw_resource pointer to a struct
149d80efd5cSThomas Hellstrom  * vmw_cotable pointer
150d80efd5cSThomas Hellstrom  *
151d80efd5cSThomas Hellstrom  * @res: Pointer to the resource.
152d80efd5cSThomas Hellstrom  */
vmw_cotable(struct vmw_resource * res)153d80efd5cSThomas Hellstrom static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
154d80efd5cSThomas Hellstrom {
155d80efd5cSThomas Hellstrom 	return container_of(res, struct vmw_cotable, res);
156d80efd5cSThomas Hellstrom }
157d80efd5cSThomas Hellstrom 
158d80efd5cSThomas Hellstrom /**
159d80efd5cSThomas Hellstrom  * vmw_cotable_destroy - Cotable resource destroy callback
160d80efd5cSThomas Hellstrom  *
161d80efd5cSThomas Hellstrom  * @res: Pointer to the cotable resource.
162d80efd5cSThomas Hellstrom  *
163d80efd5cSThomas Hellstrom  * There is no device cotable destroy command, so this function only
164d80efd5cSThomas Hellstrom  * makes sure that the resource id is set to invalid.
165d80efd5cSThomas Hellstrom  */
vmw_cotable_destroy(struct vmw_resource * res)166d80efd5cSThomas Hellstrom static int vmw_cotable_destroy(struct vmw_resource *res)
167d80efd5cSThomas Hellstrom {
168d80efd5cSThomas Hellstrom 	res->id = -1;
169d80efd5cSThomas Hellstrom 	return 0;
170d80efd5cSThomas Hellstrom }
171d80efd5cSThomas Hellstrom 
172d80efd5cSThomas Hellstrom /**
173d80efd5cSThomas Hellstrom  * vmw_cotable_unscrub - Undo a cotable unscrub operation
174d80efd5cSThomas Hellstrom  *
175d80efd5cSThomas Hellstrom  * @res: Pointer to the cotable resource
176d80efd5cSThomas Hellstrom  *
177d80efd5cSThomas Hellstrom  * This function issues commands to (re)bind the cotable to
178d80efd5cSThomas Hellstrom  * its backing mob, which needs to be validated and reserved at this point.
179d80efd5cSThomas Hellstrom  * This is identical to bind() except the function interface looks different.
180d80efd5cSThomas Hellstrom  */
vmw_cotable_unscrub(struct vmw_resource * res)181d80efd5cSThomas Hellstrom static int vmw_cotable_unscrub(struct vmw_resource *res)
182d80efd5cSThomas Hellstrom {
183d80efd5cSThomas Hellstrom 	struct vmw_cotable *vcotbl = vmw_cotable(res);
184d80efd5cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
185668b2066SZack Rusin 	struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
186d80efd5cSThomas Hellstrom 	struct {
187d80efd5cSThomas Hellstrom 		SVGA3dCmdHeader header;
188d80efd5cSThomas Hellstrom 		SVGA3dCmdDXSetCOTable body;
189d80efd5cSThomas Hellstrom 	} *cmd;
190d80efd5cSThomas Hellstrom 
191d3116756SChristian König 	WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
19252791eeeSChristian König 	dma_resv_assert_held(bo->base.resv);
193d80efd5cSThomas Hellstrom 
1948426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
19511c45419SDeepak Rawat 	if (!cmd)
196d80efd5cSThomas Hellstrom 		return -ENOMEM;
197d80efd5cSThomas Hellstrom 
198d80efd5cSThomas Hellstrom 	WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
199d3116756SChristian König 	WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
200d80efd5cSThomas Hellstrom 	cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
201d80efd5cSThomas Hellstrom 	cmd->header.size = sizeof(cmd->body);
202d80efd5cSThomas Hellstrom 	cmd->body.cid = vcotbl->ctx->id;
203d80efd5cSThomas Hellstrom 	cmd->body.type = vcotbl->type;
204d3116756SChristian König 	cmd->body.mobid = bo->resource->start;
205d80efd5cSThomas Hellstrom 	cmd->body.validSizeInBytes = vcotbl->size_read_back;
206d80efd5cSThomas Hellstrom 
2078426ed9cSZack Rusin 	vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
208d80efd5cSThomas Hellstrom 	vcotbl->scrubbed = false;
209d80efd5cSThomas Hellstrom 
210d80efd5cSThomas Hellstrom 	return 0;
211d80efd5cSThomas Hellstrom }
212d80efd5cSThomas Hellstrom 
213d80efd5cSThomas Hellstrom /**
214d80efd5cSThomas Hellstrom  * vmw_cotable_bind - Undo a cotable unscrub operation
215d80efd5cSThomas Hellstrom  *
216d80efd5cSThomas Hellstrom  * @res: Pointer to the cotable resource
217d80efd5cSThomas Hellstrom  * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
218d80efd5cSThomas Hellstrom  * for convenience / fencing.
219d80efd5cSThomas Hellstrom  *
220d80efd5cSThomas Hellstrom  * This function issues commands to (re)bind the cotable to
221d80efd5cSThomas Hellstrom  * its backing mob, which needs to be validated and reserved at this point.
222d80efd5cSThomas Hellstrom  */
vmw_cotable_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)223d80efd5cSThomas Hellstrom static int vmw_cotable_bind(struct vmw_resource *res,
224d80efd5cSThomas Hellstrom 			    struct ttm_validate_buffer *val_buf)
225d80efd5cSThomas Hellstrom {
226d80efd5cSThomas Hellstrom 	/*
227d80efd5cSThomas Hellstrom 	 * The create() callback may have changed @res->backup without
228d80efd5cSThomas Hellstrom 	 * the caller noticing, and with val_buf->bo still pointing to
229d80efd5cSThomas Hellstrom 	 * the old backup buffer. Although hackish, and not used currently,
230d80efd5cSThomas Hellstrom 	 * take the opportunity to correct the value here so that it's not
231d80efd5cSThomas Hellstrom 	 * misused in the future.
232d80efd5cSThomas Hellstrom 	 */
233668b2066SZack Rusin 	val_buf->bo = &res->guest_memory_bo->tbo;
234d80efd5cSThomas Hellstrom 
235d80efd5cSThomas Hellstrom 	return vmw_cotable_unscrub(res);
236d80efd5cSThomas Hellstrom }
237d80efd5cSThomas Hellstrom 
238d80efd5cSThomas Hellstrom /**
239d80efd5cSThomas Hellstrom  * vmw_cotable_scrub - Scrub the cotable from the device.
240d80efd5cSThomas Hellstrom  *
241d80efd5cSThomas Hellstrom  * @res: Pointer to the cotable resource.
242d80efd5cSThomas Hellstrom  * @readback: Whether initiate a readback of the cotable data to the backup
243d80efd5cSThomas Hellstrom  * buffer.
244d80efd5cSThomas Hellstrom  *
245d80efd5cSThomas Hellstrom  * In some situations (context swapouts) it might be desirable to make the
246d80efd5cSThomas Hellstrom  * device forget about the cotable without performing a full unbind. A full
247d80efd5cSThomas Hellstrom  * unbind requires reserved backup buffers and it might not be possible to
248d80efd5cSThomas Hellstrom  * reserve them due to locking order violation issues. The vmw_cotable_scrub
249d80efd5cSThomas Hellstrom  * function implements a partial unbind() without that requirement but with the
250d80efd5cSThomas Hellstrom  * following restrictions.
251d80efd5cSThomas Hellstrom  * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
252d80efd5cSThomas Hellstrom  *    be called.
253d80efd5cSThomas Hellstrom  * 2) Before the cotable backing buffer is used by the CPU, or during the
254d80efd5cSThomas Hellstrom  *    resource destruction, vmw_cotable_unbind() must be called.
255d80efd5cSThomas Hellstrom  */
vmw_cotable_scrub(struct vmw_resource * res,bool readback)256d80efd5cSThomas Hellstrom int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
257d80efd5cSThomas Hellstrom {
258d80efd5cSThomas Hellstrom 	struct vmw_cotable *vcotbl = vmw_cotable(res);
259d80efd5cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
260d80efd5cSThomas Hellstrom 	size_t submit_size;
261d80efd5cSThomas Hellstrom 
262d80efd5cSThomas Hellstrom 	struct {
263d80efd5cSThomas Hellstrom 		SVGA3dCmdHeader header;
264d80efd5cSThomas Hellstrom 		SVGA3dCmdDXReadbackCOTable body;
265d80efd5cSThomas Hellstrom 	} *cmd0;
266d80efd5cSThomas Hellstrom 	struct {
267d80efd5cSThomas Hellstrom 		SVGA3dCmdHeader header;
268d80efd5cSThomas Hellstrom 		SVGA3dCmdDXSetCOTable body;
269d80efd5cSThomas Hellstrom 	} *cmd1;
270d80efd5cSThomas Hellstrom 
271d80efd5cSThomas Hellstrom 	if (vcotbl->scrubbed)
272d80efd5cSThomas Hellstrom 		return 0;
273d80efd5cSThomas Hellstrom 
274d80efd5cSThomas Hellstrom 	if (co_info[vcotbl->type].unbind_func)
275d80efd5cSThomas Hellstrom 		co_info[vcotbl->type].unbind_func(dev_priv,
276d80efd5cSThomas Hellstrom 						  &vcotbl->resource_list,
277d80efd5cSThomas Hellstrom 						  readback);
278d80efd5cSThomas Hellstrom 	submit_size = sizeof(*cmd1);
279d80efd5cSThomas Hellstrom 	if (readback)
280d80efd5cSThomas Hellstrom 		submit_size += sizeof(*cmd0);
281d80efd5cSThomas Hellstrom 
2828426ed9cSZack Rusin 	cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
28311c45419SDeepak Rawat 	if (!cmd1)
284d80efd5cSThomas Hellstrom 		return -ENOMEM;
285d80efd5cSThomas Hellstrom 
286d80efd5cSThomas Hellstrom 	vcotbl->size_read_back = 0;
287d80efd5cSThomas Hellstrom 	if (readback) {
288d80efd5cSThomas Hellstrom 		cmd0 = (void *) cmd1;
289d80efd5cSThomas Hellstrom 		cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
290d80efd5cSThomas Hellstrom 		cmd0->header.size = sizeof(cmd0->body);
291d80efd5cSThomas Hellstrom 		cmd0->body.cid = vcotbl->ctx->id;
292d80efd5cSThomas Hellstrom 		cmd0->body.type = vcotbl->type;
293d80efd5cSThomas Hellstrom 		cmd1 = (void *) &cmd0[1];
294668b2066SZack Rusin 		vcotbl->size_read_back = res->guest_memory_size;
295d80efd5cSThomas Hellstrom 	}
296d80efd5cSThomas Hellstrom 	cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
297d80efd5cSThomas Hellstrom 	cmd1->header.size = sizeof(cmd1->body);
298d80efd5cSThomas Hellstrom 	cmd1->body.cid = vcotbl->ctx->id;
299d80efd5cSThomas Hellstrom 	cmd1->body.type = vcotbl->type;
300d80efd5cSThomas Hellstrom 	cmd1->body.mobid = SVGA3D_INVALID_ID;
301d80efd5cSThomas Hellstrom 	cmd1->body.validSizeInBytes = 0;
3028426ed9cSZack Rusin 	vmw_cmd_commit_flush(dev_priv, submit_size);
303d80efd5cSThomas Hellstrom 	vcotbl->scrubbed = true;
304d80efd5cSThomas Hellstrom 
305d80efd5cSThomas Hellstrom 	/* Trigger a create() on next validate. */
306d80efd5cSThomas Hellstrom 	res->id = -1;
307d80efd5cSThomas Hellstrom 
308d80efd5cSThomas Hellstrom 	return 0;
309d80efd5cSThomas Hellstrom }
310d80efd5cSThomas Hellstrom 
311d80efd5cSThomas Hellstrom /**
312d80efd5cSThomas Hellstrom  * vmw_cotable_unbind - Cotable resource unbind callback
313d80efd5cSThomas Hellstrom  *
314d80efd5cSThomas Hellstrom  * @res: Pointer to the cotable resource.
315d80efd5cSThomas Hellstrom  * @readback: Whether to read back cotable data to the backup buffer.
3163894709eSLee Jones  * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
317d80efd5cSThomas Hellstrom  * for convenience / fencing.
318d80efd5cSThomas Hellstrom  *
319d80efd5cSThomas Hellstrom  * Unbinds the cotable from the device and fences the backup buffer.
320d80efd5cSThomas Hellstrom  */
vmw_cotable_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)321d80efd5cSThomas Hellstrom static int vmw_cotable_unbind(struct vmw_resource *res,
322d80efd5cSThomas Hellstrom 			      bool readback,
323d80efd5cSThomas Hellstrom 			      struct ttm_validate_buffer *val_buf)
324d80efd5cSThomas Hellstrom {
325d80efd5cSThomas Hellstrom 	struct vmw_cotable *vcotbl = vmw_cotable(res);
326d80efd5cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
327d80efd5cSThomas Hellstrom 	struct ttm_buffer_object *bo = val_buf->bo;
328d80efd5cSThomas Hellstrom 	struct vmw_fence_obj *fence;
329d80efd5cSThomas Hellstrom 
330a0a63940SThomas Hellstrom 	if (!vmw_resource_mob_attached(res))
331d80efd5cSThomas Hellstrom 		return 0;
332d80efd5cSThomas Hellstrom 
333d3116756SChristian König 	WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
33452791eeeSChristian König 	dma_resv_assert_held(bo->base.resv);
335d80efd5cSThomas Hellstrom 
336d80efd5cSThomas Hellstrom 	mutex_lock(&dev_priv->binding_mutex);
337d80efd5cSThomas Hellstrom 	if (!vcotbl->scrubbed)
338d80efd5cSThomas Hellstrom 		vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
339d80efd5cSThomas Hellstrom 	mutex_unlock(&dev_priv->binding_mutex);
340d80efd5cSThomas Hellstrom 	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
341e9431ea5SThomas Hellstrom 	vmw_bo_fence_single(bo, fence);
342d80efd5cSThomas Hellstrom 	if (likely(fence != NULL))
343d80efd5cSThomas Hellstrom 		vmw_fence_obj_unreference(&fence);
344d80efd5cSThomas Hellstrom 
34549558b47SChristian Engelmayer 	return 0;
346d80efd5cSThomas Hellstrom }
347d80efd5cSThomas Hellstrom 
348d80efd5cSThomas Hellstrom /**
349d80efd5cSThomas Hellstrom  * vmw_cotable_readback - Read back a cotable without unbinding.
350d80efd5cSThomas Hellstrom  *
351d80efd5cSThomas Hellstrom  * @res: The cotable resource.
352d80efd5cSThomas Hellstrom  *
353d80efd5cSThomas Hellstrom  * Reads back a cotable to its backing mob without scrubbing the MOB from
354d80efd5cSThomas Hellstrom  * the cotable. The MOB is fenced for subsequent CPU access.
355d80efd5cSThomas Hellstrom  */
vmw_cotable_readback(struct vmw_resource * res)356d80efd5cSThomas Hellstrom static int vmw_cotable_readback(struct vmw_resource *res)
357d80efd5cSThomas Hellstrom {
358d80efd5cSThomas Hellstrom 	struct vmw_cotable *vcotbl = vmw_cotable(res);
359d80efd5cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
360d80efd5cSThomas Hellstrom 
361d80efd5cSThomas Hellstrom 	struct {
362d80efd5cSThomas Hellstrom 		SVGA3dCmdHeader header;
363d80efd5cSThomas Hellstrom 		SVGA3dCmdDXReadbackCOTable body;
364d80efd5cSThomas Hellstrom 	} *cmd;
365d80efd5cSThomas Hellstrom 	struct vmw_fence_obj *fence;
366d80efd5cSThomas Hellstrom 
367d80efd5cSThomas Hellstrom 	if (!vcotbl->scrubbed) {
3688426ed9cSZack Rusin 		cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
36911c45419SDeepak Rawat 		if (!cmd)
370d80efd5cSThomas Hellstrom 			return -ENOMEM;
37111c45419SDeepak Rawat 
372d80efd5cSThomas Hellstrom 		cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
373d80efd5cSThomas Hellstrom 		cmd->header.size = sizeof(cmd->body);
374d80efd5cSThomas Hellstrom 		cmd->body.cid = vcotbl->ctx->id;
375d80efd5cSThomas Hellstrom 		cmd->body.type = vcotbl->type;
376668b2066SZack Rusin 		vcotbl->size_read_back = res->guest_memory_size;
3778426ed9cSZack Rusin 		vmw_cmd_commit(dev_priv, sizeof(*cmd));
378d80efd5cSThomas Hellstrom 	}
379d80efd5cSThomas Hellstrom 
380d80efd5cSThomas Hellstrom 	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
381668b2066SZack Rusin 	vmw_bo_fence_single(&res->guest_memory_bo->tbo, fence);
382d80efd5cSThomas Hellstrom 	vmw_fence_obj_unreference(&fence);
383d80efd5cSThomas Hellstrom 
384d80efd5cSThomas Hellstrom 	return 0;
385d80efd5cSThomas Hellstrom }
386d80efd5cSThomas Hellstrom 
387d80efd5cSThomas Hellstrom /**
388d80efd5cSThomas Hellstrom  * vmw_cotable_resize - Resize a cotable.
389d80efd5cSThomas Hellstrom  *
390d80efd5cSThomas Hellstrom  * @res: The cotable resource.
391d80efd5cSThomas Hellstrom  * @new_size: The new size.
392d80efd5cSThomas Hellstrom  *
393d80efd5cSThomas Hellstrom  * Resizes a cotable and binds the new backup buffer.
394d80efd5cSThomas Hellstrom  * On failure the cotable is left intact.
395d80efd5cSThomas Hellstrom  * Important! This function may not fail once the MOB switch has been
396d80efd5cSThomas Hellstrom  * committed to hardware. That would put the device context in an
397d80efd5cSThomas Hellstrom  * invalid state which we can't currently recover from.
398d80efd5cSThomas Hellstrom  */
vmw_cotable_resize(struct vmw_resource * res,size_t new_size)399d80efd5cSThomas Hellstrom static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
400d80efd5cSThomas Hellstrom {
40119be5570SChristian König 	struct ttm_operation_ctx ctx = { false, false };
402d80efd5cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
403d80efd5cSThomas Hellstrom 	struct vmw_cotable *vcotbl = vmw_cotable(res);
404668b2066SZack Rusin 	struct vmw_bo *buf, *old_buf = res->guest_memory_bo;
405668b2066SZack Rusin 	struct ttm_buffer_object *bo, *old_bo = &res->guest_memory_bo->tbo;
406668b2066SZack Rusin 	size_t old_size = res->guest_memory_size;
407d80efd5cSThomas Hellstrom 	size_t old_size_read_back = vcotbl->size_read_back;
408d80efd5cSThomas Hellstrom 	size_t cur_size_read_back;
409d80efd5cSThomas Hellstrom 	struct ttm_bo_kmap_obj old_map, new_map;
410d80efd5cSThomas Hellstrom 	int ret;
411d80efd5cSThomas Hellstrom 	size_t i;
412668b2066SZack Rusin 	struct vmw_bo_params bo_params = {
413668b2066SZack Rusin 		.domain = VMW_BO_DOMAIN_MOB,
414668b2066SZack Rusin 		.busy_domain = VMW_BO_DOMAIN_MOB,
415668b2066SZack Rusin 		.bo_type = ttm_bo_type_device,
416668b2066SZack Rusin 		.size = new_size,
417668b2066SZack Rusin 		.pin = true
418668b2066SZack Rusin 	};
419d80efd5cSThomas Hellstrom 
4204bb50606SZack Rusin 	MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE);
4214bb50606SZack Rusin 	MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE);
4224bb50606SZack Rusin 
423d80efd5cSThomas Hellstrom 	ret = vmw_cotable_readback(res);
424d80efd5cSThomas Hellstrom 	if (ret)
4254bb50606SZack Rusin 		goto out_done;
426d80efd5cSThomas Hellstrom 
427d80efd5cSThomas Hellstrom 	cur_size_read_back = vcotbl->size_read_back;
428d80efd5cSThomas Hellstrom 	vcotbl->size_read_back = old_size_read_back;
429d80efd5cSThomas Hellstrom 
430d80efd5cSThomas Hellstrom 	/*
431d80efd5cSThomas Hellstrom 	 * While device is processing, Allocate and reserve a buffer object
432d80efd5cSThomas Hellstrom 	 * for the new COTable. Initially pin the buffer object to make sure
433d80efd5cSThomas Hellstrom 	 * we can use tryreserve without failure.
434d80efd5cSThomas Hellstrom 	 */
435*91398b41SZack Rusin 	ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
436d80efd5cSThomas Hellstrom 	if (ret) {
437d80efd5cSThomas Hellstrom 		DRM_ERROR("Failed initializing new cotable MOB.\n");
4384bb50606SZack Rusin 		goto out_done;
439d80efd5cSThomas Hellstrom 	}
440d80efd5cSThomas Hellstrom 
441668b2066SZack Rusin 	bo = &buf->tbo;
442dfd5e50eSChristian König 	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
443d80efd5cSThomas Hellstrom 
4448aa6d4fcSChristian König 	ret = ttm_bo_wait(old_bo, false, false);
445d80efd5cSThomas Hellstrom 	if (unlikely(ret != 0)) {
446d80efd5cSThomas Hellstrom 		DRM_ERROR("Failed waiting for cotable unbind.\n");
447d80efd5cSThomas Hellstrom 		goto out_wait;
448d80efd5cSThomas Hellstrom 	}
449d80efd5cSThomas Hellstrom 
450d80efd5cSThomas Hellstrom 	/*
451d80efd5cSThomas Hellstrom 	 * Do a page by page copy of COTables. This eliminates slow vmap()s.
452d80efd5cSThomas Hellstrom 	 * This should really be a TTM utility.
453d80efd5cSThomas Hellstrom 	 */
454e3c92eb4SSomalapuram Amaranath 	for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) {
455d80efd5cSThomas Hellstrom 		bool dummy;
456d80efd5cSThomas Hellstrom 
457d80efd5cSThomas Hellstrom 		ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
458d80efd5cSThomas Hellstrom 		if (unlikely(ret != 0)) {
459d80efd5cSThomas Hellstrom 			DRM_ERROR("Failed mapping old COTable on resize.\n");
460d80efd5cSThomas Hellstrom 			goto out_wait;
461d80efd5cSThomas Hellstrom 		}
462d80efd5cSThomas Hellstrom 		ret = ttm_bo_kmap(bo, i, 1, &new_map);
463d80efd5cSThomas Hellstrom 		if (unlikely(ret != 0)) {
464d80efd5cSThomas Hellstrom 			DRM_ERROR("Failed mapping new COTable on resize.\n");
465d80efd5cSThomas Hellstrom 			goto out_map_new;
466d80efd5cSThomas Hellstrom 		}
467d80efd5cSThomas Hellstrom 		memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
468d80efd5cSThomas Hellstrom 		       ttm_kmap_obj_virtual(&old_map, &dummy),
469d80efd5cSThomas Hellstrom 		       PAGE_SIZE);
470d80efd5cSThomas Hellstrom 		ttm_bo_kunmap(&new_map);
471d80efd5cSThomas Hellstrom 		ttm_bo_kunmap(&old_map);
472d80efd5cSThomas Hellstrom 	}
473d80efd5cSThomas Hellstrom 
474d80efd5cSThomas Hellstrom 	/* Unpin new buffer, and switch backup buffers. */
47539985eeaSZack Rusin 	vmw_bo_placement_set(buf,
47639985eeaSZack Rusin 			     VMW_BO_DOMAIN_MOB,
47739985eeaSZack Rusin 			     VMW_BO_DOMAIN_MOB);
47839985eeaSZack Rusin 	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
479d80efd5cSThomas Hellstrom 	if (unlikely(ret != 0)) {
480d80efd5cSThomas Hellstrom 		DRM_ERROR("Failed validating new COTable backup buffer.\n");
481d80efd5cSThomas Hellstrom 		goto out_wait;
482d80efd5cSThomas Hellstrom 	}
483d80efd5cSThomas Hellstrom 
484a0a63940SThomas Hellstrom 	vmw_resource_mob_detach(res);
485668b2066SZack Rusin 	res->guest_memory_bo = buf;
486668b2066SZack Rusin 	res->guest_memory_size = new_size;
487d80efd5cSThomas Hellstrom 	vcotbl->size_read_back = cur_size_read_back;
488d80efd5cSThomas Hellstrom 
489d80efd5cSThomas Hellstrom 	/*
490d80efd5cSThomas Hellstrom 	 * Now tell the device to switch. If this fails, then we need to
491d80efd5cSThomas Hellstrom 	 * revert the full resize.
492d80efd5cSThomas Hellstrom 	 */
493d80efd5cSThomas Hellstrom 	ret = vmw_cotable_unscrub(res);
494d80efd5cSThomas Hellstrom 	if (ret) {
495d80efd5cSThomas Hellstrom 		DRM_ERROR("Failed switching COTable backup buffer.\n");
496668b2066SZack Rusin 		res->guest_memory_bo = old_buf;
497668b2066SZack Rusin 		res->guest_memory_size = old_size;
498d80efd5cSThomas Hellstrom 		vcotbl->size_read_back = old_size_read_back;
499a0a63940SThomas Hellstrom 		vmw_resource_mob_attach(res);
500d80efd5cSThomas Hellstrom 		goto out_wait;
501d80efd5cSThomas Hellstrom 	}
502d80efd5cSThomas Hellstrom 
503a0a63940SThomas Hellstrom 	vmw_resource_mob_attach(res);
504d80efd5cSThomas Hellstrom 	/* Let go of the old mob. */
505*91398b41SZack Rusin 	vmw_user_bo_unref(&old_buf);
506d80efd5cSThomas Hellstrom 	res->id = vcotbl->type;
507d80efd5cSThomas Hellstrom 
508544ccad7SZack Rusin 	ret = dma_resv_reserve_fences(bo->base.resv, 1);
509544ccad7SZack Rusin 	if (unlikely(ret))
510544ccad7SZack Rusin 		goto out_wait;
511544ccad7SZack Rusin 
512668b2066SZack Rusin 	/* Release the pin acquired in vmw_bo_create */
5132ef4fb92SZack Rusin 	ttm_bo_unpin(bo);
5142ef4fb92SZack Rusin 
5154bb50606SZack Rusin 	MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
5164bb50606SZack Rusin 
517d80efd5cSThomas Hellstrom 	return 0;
518d80efd5cSThomas Hellstrom 
519d80efd5cSThomas Hellstrom out_map_new:
520d80efd5cSThomas Hellstrom 	ttm_bo_kunmap(&old_map);
521d80efd5cSThomas Hellstrom out_wait:
5222ef4fb92SZack Rusin 	ttm_bo_unpin(bo);
523d80efd5cSThomas Hellstrom 	ttm_bo_unreserve(bo);
524*91398b41SZack Rusin 	vmw_user_bo_unref(&buf);
525d80efd5cSThomas Hellstrom 
5264bb50606SZack Rusin out_done:
5274bb50606SZack Rusin 	MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
5284bb50606SZack Rusin 
529d80efd5cSThomas Hellstrom 	return ret;
530d80efd5cSThomas Hellstrom }
531d80efd5cSThomas Hellstrom 
532d80efd5cSThomas Hellstrom /**
533d80efd5cSThomas Hellstrom  * vmw_cotable_create - Cotable resource create callback
534d80efd5cSThomas Hellstrom  *
535d80efd5cSThomas Hellstrom  * @res: Pointer to a cotable resource.
536d80efd5cSThomas Hellstrom  *
537d80efd5cSThomas Hellstrom  * There is no separate create command for cotables, so this callback, which
538d80efd5cSThomas Hellstrom  * is called before bind() in the validation sequence is instead used for two
539d80efd5cSThomas Hellstrom  * things.
540d80efd5cSThomas Hellstrom  * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
541a0a63940SThomas Hellstrom  *    buffer.
542d80efd5cSThomas Hellstrom  * 2) Resize the cotable if needed.
543d80efd5cSThomas Hellstrom  */
vmw_cotable_create(struct vmw_resource * res)544d80efd5cSThomas Hellstrom static int vmw_cotable_create(struct vmw_resource *res)
545d80efd5cSThomas Hellstrom {
546d80efd5cSThomas Hellstrom 	struct vmw_cotable *vcotbl = vmw_cotable(res);
547668b2066SZack Rusin 	size_t new_size = res->guest_memory_size;
548d80efd5cSThomas Hellstrom 	size_t needed_size;
549d80efd5cSThomas Hellstrom 	int ret;
550d80efd5cSThomas Hellstrom 
551d80efd5cSThomas Hellstrom 	/* Check whether we need to resize the cotable */
552d80efd5cSThomas Hellstrom 	needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
553d80efd5cSThomas Hellstrom 	while (needed_size > new_size)
554d80efd5cSThomas Hellstrom 		new_size *= 2;
555d80efd5cSThomas Hellstrom 
556668b2066SZack Rusin 	if (likely(new_size <= res->guest_memory_size)) {
557a0a63940SThomas Hellstrom 		if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
558d80efd5cSThomas Hellstrom 			ret = vmw_cotable_unscrub(res);
559d80efd5cSThomas Hellstrom 			if (ret)
560d80efd5cSThomas Hellstrom 				return ret;
561d80efd5cSThomas Hellstrom 		}
562d80efd5cSThomas Hellstrom 		res->id = vcotbl->type;
563d80efd5cSThomas Hellstrom 		return 0;
564d80efd5cSThomas Hellstrom 	}
565d80efd5cSThomas Hellstrom 
566d80efd5cSThomas Hellstrom 	return vmw_cotable_resize(res, new_size);
567d80efd5cSThomas Hellstrom }
568d80efd5cSThomas Hellstrom 
569d80efd5cSThomas Hellstrom /**
570d80efd5cSThomas Hellstrom  * vmw_hw_cotable_destroy - Cotable hw_destroy callback
571d80efd5cSThomas Hellstrom  *
572d80efd5cSThomas Hellstrom  * @res: Pointer to a cotable resource.
573d80efd5cSThomas Hellstrom  *
574d80efd5cSThomas Hellstrom  * The final (part of resource destruction) destroy callback.
575d80efd5cSThomas Hellstrom  */
vmw_hw_cotable_destroy(struct vmw_resource * res)576d80efd5cSThomas Hellstrom static void vmw_hw_cotable_destroy(struct vmw_resource *res)
577d80efd5cSThomas Hellstrom {
578d80efd5cSThomas Hellstrom 	(void) vmw_cotable_destroy(res);
579d80efd5cSThomas Hellstrom }
580d80efd5cSThomas Hellstrom 
581d80efd5cSThomas Hellstrom /**
582d80efd5cSThomas Hellstrom  * vmw_cotable_free - Cotable resource destructor
583d80efd5cSThomas Hellstrom  *
584d80efd5cSThomas Hellstrom  * @res: Pointer to a cotable resource.
585d80efd5cSThomas Hellstrom  */
vmw_cotable_free(struct vmw_resource * res)586d80efd5cSThomas Hellstrom static void vmw_cotable_free(struct vmw_resource *res)
587d80efd5cSThomas Hellstrom {
588d80efd5cSThomas Hellstrom 	kfree(res);
589d80efd5cSThomas Hellstrom }
590d80efd5cSThomas Hellstrom 
591d80efd5cSThomas Hellstrom /**
592d80efd5cSThomas Hellstrom  * vmw_cotable_alloc - Create a cotable resource
593d80efd5cSThomas Hellstrom  *
594d80efd5cSThomas Hellstrom  * @dev_priv: Pointer to a device private struct.
595d80efd5cSThomas Hellstrom  * @ctx: Pointer to the context resource.
596d80efd5cSThomas Hellstrom  * The cotable resource will not add a refcount.
597d80efd5cSThomas Hellstrom  * @type: The cotable type.
598d80efd5cSThomas Hellstrom  */
vmw_cotable_alloc(struct vmw_private * dev_priv,struct vmw_resource * ctx,u32 type)599d80efd5cSThomas Hellstrom struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
600d80efd5cSThomas Hellstrom 				       struct vmw_resource *ctx,
601d80efd5cSThomas Hellstrom 				       u32 type)
602d80efd5cSThomas Hellstrom {
603d80efd5cSThomas Hellstrom 	struct vmw_cotable *vcotbl;
604d80efd5cSThomas Hellstrom 	int ret;
605d80efd5cSThomas Hellstrom 	u32 num_entries;
606d80efd5cSThomas Hellstrom 
607d80efd5cSThomas Hellstrom 	vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
6081a4adb05SRavikant B Sharma 	if (unlikely(!vcotbl)) {
609d80efd5cSThomas Hellstrom 		ret = -ENOMEM;
610d80efd5cSThomas Hellstrom 		goto out_no_alloc;
611d80efd5cSThomas Hellstrom 	}
612d80efd5cSThomas Hellstrom 
613d80efd5cSThomas Hellstrom 	ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
614d80efd5cSThomas Hellstrom 				vmw_cotable_free, &vmw_cotable_func);
615d80efd5cSThomas Hellstrom 	if (unlikely(ret != 0))
616d80efd5cSThomas Hellstrom 		goto out_no_init;
617d80efd5cSThomas Hellstrom 
618d80efd5cSThomas Hellstrom 	INIT_LIST_HEAD(&vcotbl->resource_list);
619d80efd5cSThomas Hellstrom 	vcotbl->res.id = type;
620668b2066SZack Rusin 	vcotbl->res.guest_memory_size = PAGE_SIZE;
621d80efd5cSThomas Hellstrom 	num_entries = PAGE_SIZE / co_info[type].size;
622d80efd5cSThomas Hellstrom 	if (num_entries < co_info[type].min_initial_entries) {
623668b2066SZack Rusin 		vcotbl->res.guest_memory_size = co_info[type].min_initial_entries *
624d80efd5cSThomas Hellstrom 			co_info[type].size;
625668b2066SZack Rusin 		vcotbl->res.guest_memory_size = PFN_ALIGN(vcotbl->res.guest_memory_size);
626d80efd5cSThomas Hellstrom 	}
627d80efd5cSThomas Hellstrom 
628d80efd5cSThomas Hellstrom 	vcotbl->scrubbed = true;
629d80efd5cSThomas Hellstrom 	vcotbl->seen_entries = -1;
630d80efd5cSThomas Hellstrom 	vcotbl->type = type;
631d80efd5cSThomas Hellstrom 	vcotbl->ctx = ctx;
632d80efd5cSThomas Hellstrom 
63313289241SThomas Hellstrom 	vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
634d80efd5cSThomas Hellstrom 
635d80efd5cSThomas Hellstrom 	return &vcotbl->res;
636d80efd5cSThomas Hellstrom 
637d80efd5cSThomas Hellstrom out_no_init:
638d80efd5cSThomas Hellstrom 	kfree(vcotbl);
639d80efd5cSThomas Hellstrom out_no_alloc:
640d80efd5cSThomas Hellstrom 	return ERR_PTR(ret);
641d80efd5cSThomas Hellstrom }
642d80efd5cSThomas Hellstrom 
643d80efd5cSThomas Hellstrom /**
644d80efd5cSThomas Hellstrom  * vmw_cotable_notify - Notify the cotable about an item creation
645d80efd5cSThomas Hellstrom  *
646d80efd5cSThomas Hellstrom  * @res: Pointer to a cotable resource.
647d80efd5cSThomas Hellstrom  * @id: Item id.
648d80efd5cSThomas Hellstrom  */
vmw_cotable_notify(struct vmw_resource * res,int id)649d80efd5cSThomas Hellstrom int vmw_cotable_notify(struct vmw_resource *res, int id)
650d80efd5cSThomas Hellstrom {
651d80efd5cSThomas Hellstrom 	struct vmw_cotable *vcotbl = vmw_cotable(res);
652d80efd5cSThomas Hellstrom 
653d80efd5cSThomas Hellstrom 	if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
654d80efd5cSThomas Hellstrom 		DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
655d80efd5cSThomas Hellstrom 			  (unsigned) vcotbl->type, id);
656d80efd5cSThomas Hellstrom 		return -EINVAL;
657d80efd5cSThomas Hellstrom 	}
658d80efd5cSThomas Hellstrom 
659d80efd5cSThomas Hellstrom 	if (vcotbl->seen_entries < id) {
660d80efd5cSThomas Hellstrom 		/* Trigger a call to create() on next validate */
661d80efd5cSThomas Hellstrom 		res->id = -1;
662d80efd5cSThomas Hellstrom 		vcotbl->seen_entries = id;
663d80efd5cSThomas Hellstrom 	}
664d80efd5cSThomas Hellstrom 
665d80efd5cSThomas Hellstrom 	return 0;
666d80efd5cSThomas Hellstrom }
667d80efd5cSThomas Hellstrom 
668d80efd5cSThomas Hellstrom /**
6692cd80dbdSZack Rusin  * vmw_cotable_add_resource - add a view to the cotable's list of active views.
670d80efd5cSThomas Hellstrom  *
671d80efd5cSThomas Hellstrom  * @res: pointer struct vmw_resource representing the cotable.
672d80efd5cSThomas Hellstrom  * @head: pointer to the struct list_head member of the resource, dedicated
673d80efd5cSThomas Hellstrom  * to the cotable active resource list.
674d80efd5cSThomas Hellstrom  */
vmw_cotable_add_resource(struct vmw_resource * res,struct list_head * head)675d80efd5cSThomas Hellstrom void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
676d80efd5cSThomas Hellstrom {
677d80efd5cSThomas Hellstrom 	struct vmw_cotable *vcotbl =
678d80efd5cSThomas Hellstrom 		container_of(res, struct vmw_cotable, res);
679d80efd5cSThomas Hellstrom 
680d80efd5cSThomas Hellstrom 	list_add_tail(head, &vcotbl->resource_list);
681d80efd5cSThomas Hellstrom }
682