1038ecc50SThomas Hellstrom // SPDX-License-Identifier: GPL-2.0 OR MIT
2038ecc50SThomas Hellstrom /**************************************************************************
3038ecc50SThomas Hellstrom *
409881d29SZack Rusin * Copyright © 2018 - 2023 VMware, Inc., Palo Alto, CA., USA
5038ecc50SThomas Hellstrom * All Rights Reserved.
6038ecc50SThomas Hellstrom *
7038ecc50SThomas Hellstrom * Permission is hereby granted, free of charge, to any person obtaining a
8038ecc50SThomas Hellstrom * copy of this software and associated documentation files (the
9038ecc50SThomas Hellstrom * "Software"), to deal in the Software without restriction, including
10038ecc50SThomas Hellstrom * without limitation the rights to use, copy, modify, merge, publish,
11038ecc50SThomas Hellstrom * distribute, sub license, and/or sell copies of the Software, and to
12038ecc50SThomas Hellstrom * permit persons to whom the Software is furnished to do so, subject to
13038ecc50SThomas Hellstrom * the following conditions:
14038ecc50SThomas Hellstrom *
15038ecc50SThomas Hellstrom * The above copyright notice and this permission notice (including the
16038ecc50SThomas Hellstrom * next paragraph) shall be included in all copies or substantial portions
17038ecc50SThomas Hellstrom * of the Software.
18038ecc50SThomas Hellstrom *
19038ecc50SThomas Hellstrom * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20038ecc50SThomas Hellstrom * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21038ecc50SThomas Hellstrom * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22038ecc50SThomas Hellstrom * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23038ecc50SThomas Hellstrom * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24038ecc50SThomas Hellstrom * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25038ecc50SThomas Hellstrom * USE OR OTHER DEALINGS IN THE SOFTWARE.
26038ecc50SThomas Hellstrom *
27038ecc50SThomas Hellstrom **************************************************************************/
2809881d29SZack Rusin #include "vmwgfx_bo.h"
29038ecc50SThomas Hellstrom #include "vmwgfx_drv.h"
3039985eeaSZack Rusin #include "vmwgfx_resource_priv.h"
3109881d29SZack Rusin #include "vmwgfx_validation.h"
3209881d29SZack Rusin
3309881d29SZack Rusin #include <linux/slab.h>
34038ecc50SThomas Hellstrom
358aadeb8aSZack Rusin
368aadeb8aSZack Rusin #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
378aadeb8aSZack Rusin
38038ecc50SThomas Hellstrom /**
39038ecc50SThomas Hellstrom * struct vmw_validation_bo_node - Buffer object validation metadata.
40038ecc50SThomas Hellstrom * @base: Metadata used for TTM reservation- and validation.
41038ecc50SThomas Hellstrom * @hash: A hash entry used for the duplicate detection hash table.
42b7468b15SThomas Hellstrom * @coherent_count: If switching backup buffers, number of new coherent
43b7468b15SThomas Hellstrom * resources that will have this buffer as a backup buffer.
44038ecc50SThomas Hellstrom *
45038ecc50SThomas Hellstrom * Bit fields are used since these structures are allocated and freed in
46038ecc50SThomas Hellstrom * large numbers and space conservation is desired.
47038ecc50SThomas Hellstrom */
48038ecc50SThomas Hellstrom struct vmw_validation_bo_node {
49038ecc50SThomas Hellstrom struct ttm_validate_buffer base;
502985c964SThomas Zimmermann struct vmwgfx_hash_item hash;
51b7468b15SThomas Hellstrom unsigned int coherent_count;
52038ecc50SThomas Hellstrom };
53038ecc50SThomas Hellstrom /**
54038ecc50SThomas Hellstrom * struct vmw_validation_res_node - Resource validation metadata.
55038ecc50SThomas Hellstrom * @head: List head for the resource validation list.
56038ecc50SThomas Hellstrom * @hash: A hash entry used for the duplicate detection hash table.
57038ecc50SThomas Hellstrom * @res: Reference counted resource pointer.
58*668b2066SZack Rusin * @new_guest_memory_bo: Non ref-counted pointer to new guest memory buffer
59*668b2066SZack Rusin * to be assigned to a resource.
60*668b2066SZack Rusin * @new_guest_memory_offset: Offset into the new backup mob for resources
61*668b2066SZack Rusin * that can share MOBs.
62038ecc50SThomas Hellstrom * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
63038ecc50SThomas Hellstrom * the command stream provides a mob bind operation.
64*668b2066SZack Rusin * @switching_guest_memory_bo: The validation process is switching backup MOB.
65038ecc50SThomas Hellstrom * @first_usage: True iff the resource has been seen only once in the current
66038ecc50SThomas Hellstrom * validation batch.
67038ecc50SThomas Hellstrom * @reserved: Whether the resource is currently reserved by this process.
68dc03b634SLee Jones * @dirty_set: Change dirty status of the resource.
69dc03b634SLee Jones * @dirty: Dirty information VMW_RES_DIRTY_XX.
70038ecc50SThomas Hellstrom * @private: Optionally additional memory for caller-private data.
71038ecc50SThomas Hellstrom *
72038ecc50SThomas Hellstrom * Bit fields are used since these structures are allocated and freed in
73038ecc50SThomas Hellstrom * large numbers and space conservation is desired.
74038ecc50SThomas Hellstrom */
75038ecc50SThomas Hellstrom struct vmw_validation_res_node {
76038ecc50SThomas Hellstrom struct list_head head;
772985c964SThomas Zimmermann struct vmwgfx_hash_item hash;
78038ecc50SThomas Hellstrom struct vmw_resource *res;
79*668b2066SZack Rusin struct vmw_bo *new_guest_memory_bo;
80*668b2066SZack Rusin unsigned long new_guest_memory_offset;
81038ecc50SThomas Hellstrom u32 no_buffer_needed : 1;
82*668b2066SZack Rusin u32 switching_guest_memory_bo : 1;
83038ecc50SThomas Hellstrom u32 first_usage : 1;
84038ecc50SThomas Hellstrom u32 reserved : 1;
85a9f58c45SThomas Hellstrom u32 dirty : 1;
86a9f58c45SThomas Hellstrom u32 dirty_set : 1;
87ae57b219SZou Wei unsigned long private[];
88038ecc50SThomas Hellstrom };
89038ecc50SThomas Hellstrom
90038ecc50SThomas Hellstrom /**
91fc18afcfSThomas Hellstrom * vmw_validation_mem_alloc - Allocate kernel memory from the validation
92fc18afcfSThomas Hellstrom * context based allocator
93fc18afcfSThomas Hellstrom * @ctx: The validation context
94fc18afcfSThomas Hellstrom * @size: The number of bytes to allocated.
95fc18afcfSThomas Hellstrom *
96fc18afcfSThomas Hellstrom * The memory allocated may not exceed PAGE_SIZE, and the returned
97fc18afcfSThomas Hellstrom * address is aligned to sizeof(long). All memory allocated this way is
98fc18afcfSThomas Hellstrom * reclaimed after validation when calling any of the exported functions:
99fc18afcfSThomas Hellstrom * vmw_validation_unref_lists()
100fc18afcfSThomas Hellstrom * vmw_validation_revert()
101fc18afcfSThomas Hellstrom * vmw_validation_done()
102fc18afcfSThomas Hellstrom *
103fc18afcfSThomas Hellstrom * Return: Pointer to the allocated memory on success. NULL on failure.
104fc18afcfSThomas Hellstrom */
vmw_validation_mem_alloc(struct vmw_validation_context * ctx,unsigned int size)10564ad2abfSThomas Hellstrom void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
10664ad2abfSThomas Hellstrom unsigned int size)
107fc18afcfSThomas Hellstrom {
108fc18afcfSThomas Hellstrom void *addr;
109fc18afcfSThomas Hellstrom
11064ad2abfSThomas Hellstrom size = vmw_validation_align(size);
111fc18afcfSThomas Hellstrom if (size > PAGE_SIZE)
112fc18afcfSThomas Hellstrom return NULL;
113fc18afcfSThomas Hellstrom
114fc18afcfSThomas Hellstrom if (ctx->mem_size_left < size) {
115fd567467SThomas Hellstrom struct page *page;
116fc18afcfSThomas Hellstrom
117fd567467SThomas Hellstrom if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
1188aadeb8aSZack Rusin ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
1198aadeb8aSZack Rusin ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
120fd567467SThomas Hellstrom }
121fd567467SThomas Hellstrom
122fd567467SThomas Hellstrom page = alloc_page(GFP_KERNEL | __GFP_ZERO);
123fc18afcfSThomas Hellstrom if (!page)
124fc18afcfSThomas Hellstrom return NULL;
125fc18afcfSThomas Hellstrom
126fd567467SThomas Hellstrom if (ctx->vm)
127fd567467SThomas Hellstrom ctx->vm_size_left -= PAGE_SIZE;
128fd567467SThomas Hellstrom
129fc18afcfSThomas Hellstrom list_add_tail(&page->lru, &ctx->page_list);
130fc18afcfSThomas Hellstrom ctx->page_address = page_address(page);
131fc18afcfSThomas Hellstrom ctx->mem_size_left = PAGE_SIZE;
132fc18afcfSThomas Hellstrom }
133fc18afcfSThomas Hellstrom
134fc18afcfSThomas Hellstrom addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
135fc18afcfSThomas Hellstrom ctx->mem_size_left -= size;
136fc18afcfSThomas Hellstrom
137fc18afcfSThomas Hellstrom return addr;
138fc18afcfSThomas Hellstrom }
139fc18afcfSThomas Hellstrom
140fc18afcfSThomas Hellstrom /**
141fc18afcfSThomas Hellstrom * vmw_validation_mem_free - Free all memory allocated using
142fc18afcfSThomas Hellstrom * vmw_validation_mem_alloc()
143fc18afcfSThomas Hellstrom * @ctx: The validation context
144fc18afcfSThomas Hellstrom *
145fc18afcfSThomas Hellstrom * All memory previously allocated for this context using
146fc18afcfSThomas Hellstrom * vmw_validation_mem_alloc() is freed.
147fc18afcfSThomas Hellstrom */
vmw_validation_mem_free(struct vmw_validation_context * ctx)148fc18afcfSThomas Hellstrom static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
149fc18afcfSThomas Hellstrom {
150fc18afcfSThomas Hellstrom struct page *entry, *next;
151fc18afcfSThomas Hellstrom
152fc18afcfSThomas Hellstrom list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
153fc18afcfSThomas Hellstrom list_del_init(&entry->lru);
154fc18afcfSThomas Hellstrom __free_page(entry);
155fc18afcfSThomas Hellstrom }
156fc18afcfSThomas Hellstrom
157fc18afcfSThomas Hellstrom ctx->mem_size_left = 0;
158fd567467SThomas Hellstrom if (ctx->vm && ctx->total_mem) {
159fd567467SThomas Hellstrom ctx->total_mem = 0;
160fd567467SThomas Hellstrom ctx->vm_size_left = 0;
161fd567467SThomas Hellstrom }
162fc18afcfSThomas Hellstrom }
163fc18afcfSThomas Hellstrom
164fc18afcfSThomas Hellstrom /**
165038ecc50SThomas Hellstrom * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
166038ecc50SThomas Hellstrom * validation context's lists.
167038ecc50SThomas Hellstrom * @ctx: The validation context to search.
168038ecc50SThomas Hellstrom * @vbo: The buffer object to search for.
169038ecc50SThomas Hellstrom *
170038ecc50SThomas Hellstrom * Return: Pointer to the struct vmw_validation_bo_node referencing the
171038ecc50SThomas Hellstrom * duplicate, or NULL if none found.
172038ecc50SThomas Hellstrom */
173038ecc50SThomas Hellstrom static struct vmw_validation_bo_node *
vmw_validation_find_bo_dup(struct vmw_validation_context * ctx,struct vmw_bo * vbo)174038ecc50SThomas Hellstrom vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
17509881d29SZack Rusin struct vmw_bo *vbo)
176038ecc50SThomas Hellstrom {
177038ecc50SThomas Hellstrom struct vmw_validation_bo_node *bo_node = NULL;
178038ecc50SThomas Hellstrom
179038ecc50SThomas Hellstrom if (!ctx->merge_dups)
180038ecc50SThomas Hellstrom return NULL;
181038ecc50SThomas Hellstrom
1829e931f2eSMaaz Mombasawala if (ctx->sw_context) {
1832985c964SThomas Zimmermann struct vmwgfx_hash_item *hash;
1849e931f2eSMaaz Mombasawala unsigned long key = (unsigned long) vbo;
185038ecc50SThomas Hellstrom
1869e931f2eSMaaz Mombasawala hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
1879e931f2eSMaaz Mombasawala if (hash->key == key) {
188038ecc50SThomas Hellstrom bo_node = container_of(hash, typeof(*bo_node), hash);
1899e931f2eSMaaz Mombasawala break;
1909e931f2eSMaaz Mombasawala }
1919e931f2eSMaaz Mombasawala }
192038ecc50SThomas Hellstrom } else {
193038ecc50SThomas Hellstrom struct vmw_validation_bo_node *entry;
194038ecc50SThomas Hellstrom
195038ecc50SThomas Hellstrom list_for_each_entry(entry, &ctx->bo_list, base.head) {
196*668b2066SZack Rusin if (entry->base.bo == &vbo->tbo) {
197038ecc50SThomas Hellstrom bo_node = entry;
198038ecc50SThomas Hellstrom break;
199038ecc50SThomas Hellstrom }
200038ecc50SThomas Hellstrom }
201038ecc50SThomas Hellstrom }
202038ecc50SThomas Hellstrom
203038ecc50SThomas Hellstrom return bo_node;
204038ecc50SThomas Hellstrom }
205038ecc50SThomas Hellstrom
206038ecc50SThomas Hellstrom /**
207038ecc50SThomas Hellstrom * vmw_validation_find_res_dup - Find a duplicate resource entry in the
208038ecc50SThomas Hellstrom * validation context's lists.
209038ecc50SThomas Hellstrom * @ctx: The validation context to search.
210dc03b634SLee Jones * @res: Reference counted resource pointer.
211038ecc50SThomas Hellstrom *
212038ecc50SThomas Hellstrom * Return: Pointer to the struct vmw_validation_bo_node referencing the
213038ecc50SThomas Hellstrom * duplicate, or NULL if none found.
214038ecc50SThomas Hellstrom */
215038ecc50SThomas Hellstrom static struct vmw_validation_res_node *
vmw_validation_find_res_dup(struct vmw_validation_context * ctx,struct vmw_resource * res)216038ecc50SThomas Hellstrom vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
217038ecc50SThomas Hellstrom struct vmw_resource *res)
218038ecc50SThomas Hellstrom {
219038ecc50SThomas Hellstrom struct vmw_validation_res_node *res_node = NULL;
220038ecc50SThomas Hellstrom
221038ecc50SThomas Hellstrom if (!ctx->merge_dups)
222038ecc50SThomas Hellstrom return NULL;
223038ecc50SThomas Hellstrom
2249e931f2eSMaaz Mombasawala if (ctx->sw_context) {
2252985c964SThomas Zimmermann struct vmwgfx_hash_item *hash;
2269e931f2eSMaaz Mombasawala unsigned long key = (unsigned long) res;
227038ecc50SThomas Hellstrom
2289e931f2eSMaaz Mombasawala hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
2299e931f2eSMaaz Mombasawala if (hash->key == key) {
230038ecc50SThomas Hellstrom res_node = container_of(hash, typeof(*res_node), hash);
2319e931f2eSMaaz Mombasawala break;
2329e931f2eSMaaz Mombasawala }
2339e931f2eSMaaz Mombasawala }
234038ecc50SThomas Hellstrom } else {
235038ecc50SThomas Hellstrom struct vmw_validation_res_node *entry;
236038ecc50SThomas Hellstrom
237038ecc50SThomas Hellstrom list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
238038ecc50SThomas Hellstrom if (entry->res == res) {
239038ecc50SThomas Hellstrom res_node = entry;
240038ecc50SThomas Hellstrom goto out;
241038ecc50SThomas Hellstrom }
242038ecc50SThomas Hellstrom }
243038ecc50SThomas Hellstrom
244038ecc50SThomas Hellstrom list_for_each_entry(entry, &ctx->resource_list, head) {
245038ecc50SThomas Hellstrom if (entry->res == res) {
246038ecc50SThomas Hellstrom res_node = entry;
247038ecc50SThomas Hellstrom break;
248038ecc50SThomas Hellstrom }
249038ecc50SThomas Hellstrom }
250038ecc50SThomas Hellstrom
251038ecc50SThomas Hellstrom }
252038ecc50SThomas Hellstrom out:
253038ecc50SThomas Hellstrom return res_node;
254038ecc50SThomas Hellstrom }
255038ecc50SThomas Hellstrom
256038ecc50SThomas Hellstrom /**
257038ecc50SThomas Hellstrom * vmw_validation_add_bo - Add a buffer object to the validation context.
258038ecc50SThomas Hellstrom * @ctx: The validation context.
259038ecc50SThomas Hellstrom * @vbo: The buffer object.
260038ecc50SThomas Hellstrom *
261038ecc50SThomas Hellstrom * Return: Zero on success, negative error code otherwise.
262038ecc50SThomas Hellstrom */
vmw_validation_add_bo(struct vmw_validation_context * ctx,struct vmw_bo * vbo)263038ecc50SThomas Hellstrom int vmw_validation_add_bo(struct vmw_validation_context *ctx,
26439985eeaSZack Rusin struct vmw_bo *vbo)
265038ecc50SThomas Hellstrom {
266038ecc50SThomas Hellstrom struct vmw_validation_bo_node *bo_node;
267038ecc50SThomas Hellstrom
268038ecc50SThomas Hellstrom bo_node = vmw_validation_find_bo_dup(ctx, vbo);
26939985eeaSZack Rusin if (!bo_node) {
270038ecc50SThomas Hellstrom struct ttm_validate_buffer *val_buf;
271038ecc50SThomas Hellstrom
272fc18afcfSThomas Hellstrom bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
273038ecc50SThomas Hellstrom if (!bo_node)
274038ecc50SThomas Hellstrom return -ENOMEM;
275038ecc50SThomas Hellstrom
2769e931f2eSMaaz Mombasawala if (ctx->sw_context) {
277038ecc50SThomas Hellstrom bo_node->hash.key = (unsigned long) vbo;
2789e931f2eSMaaz Mombasawala hash_add_rcu(ctx->sw_context->res_ht, &bo_node->hash.head,
2799e931f2eSMaaz Mombasawala bo_node->hash.key);
280038ecc50SThomas Hellstrom }
281038ecc50SThomas Hellstrom val_buf = &bo_node->base;
282*668b2066SZack Rusin val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo);
28364ad2abfSThomas Hellstrom if (!val_buf->bo)
28464ad2abfSThomas Hellstrom return -ESRCH;
285a9f34c70SChristian König val_buf->num_shared = 0;
286038ecc50SThomas Hellstrom list_add_tail(&val_buf->head, &ctx->bo_list);
287038ecc50SThomas Hellstrom }
288038ecc50SThomas Hellstrom
289038ecc50SThomas Hellstrom return 0;
290038ecc50SThomas Hellstrom }
291038ecc50SThomas Hellstrom
292038ecc50SThomas Hellstrom /**
293038ecc50SThomas Hellstrom * vmw_validation_add_resource - Add a resource to the validation context.
294038ecc50SThomas Hellstrom * @ctx: The validation context.
295038ecc50SThomas Hellstrom * @res: The resource.
296038ecc50SThomas Hellstrom * @priv_size: Size of private, additional metadata.
297a9f58c45SThomas Hellstrom * @dirty: Whether to change dirty status.
298038ecc50SThomas Hellstrom * @p_node: Output pointer of additional metadata address.
299038ecc50SThomas Hellstrom * @first_usage: Whether this was the first time this resource was seen.
300038ecc50SThomas Hellstrom *
301038ecc50SThomas Hellstrom * Return: Zero on success, negative error code otherwise.
302038ecc50SThomas Hellstrom */
vmw_validation_add_resource(struct vmw_validation_context * ctx,struct vmw_resource * res,size_t priv_size,u32 dirty,void ** p_node,bool * first_usage)303038ecc50SThomas Hellstrom int vmw_validation_add_resource(struct vmw_validation_context *ctx,
304038ecc50SThomas Hellstrom struct vmw_resource *res,
305038ecc50SThomas Hellstrom size_t priv_size,
306a9f58c45SThomas Hellstrom u32 dirty,
307038ecc50SThomas Hellstrom void **p_node,
308038ecc50SThomas Hellstrom bool *first_usage)
309038ecc50SThomas Hellstrom {
310038ecc50SThomas Hellstrom struct vmw_validation_res_node *node;
311038ecc50SThomas Hellstrom
312038ecc50SThomas Hellstrom node = vmw_validation_find_res_dup(ctx, res);
313038ecc50SThomas Hellstrom if (node) {
314038ecc50SThomas Hellstrom node->first_usage = 0;
315038ecc50SThomas Hellstrom goto out_fill;
316038ecc50SThomas Hellstrom }
317038ecc50SThomas Hellstrom
318fc18afcfSThomas Hellstrom node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
319038ecc50SThomas Hellstrom if (!node) {
3205724f899SDeepak Rawat VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
321038ecc50SThomas Hellstrom return -ENOMEM;
322038ecc50SThomas Hellstrom }
323038ecc50SThomas Hellstrom
3249e931f2eSMaaz Mombasawala if (ctx->sw_context) {
325038ecc50SThomas Hellstrom node->hash.key = (unsigned long) res;
3269e931f2eSMaaz Mombasawala hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
327038ecc50SThomas Hellstrom }
32864ad2abfSThomas Hellstrom node->res = vmw_resource_reference_unless_doomed(res);
32964ad2abfSThomas Hellstrom if (!node->res)
33064ad2abfSThomas Hellstrom return -ESRCH;
33164ad2abfSThomas Hellstrom
332038ecc50SThomas Hellstrom node->first_usage = 1;
333038ecc50SThomas Hellstrom if (!res->dev_priv->has_mob) {
334038ecc50SThomas Hellstrom list_add_tail(&node->head, &ctx->resource_list);
335038ecc50SThomas Hellstrom } else {
336038ecc50SThomas Hellstrom switch (vmw_res_type(res)) {
337038ecc50SThomas Hellstrom case vmw_res_context:
338038ecc50SThomas Hellstrom case vmw_res_dx_context:
339038ecc50SThomas Hellstrom list_add(&node->head, &ctx->resource_ctx_list);
340038ecc50SThomas Hellstrom break;
341038ecc50SThomas Hellstrom case vmw_res_cotable:
342038ecc50SThomas Hellstrom list_add_tail(&node->head, &ctx->resource_ctx_list);
343038ecc50SThomas Hellstrom break;
344038ecc50SThomas Hellstrom default:
345038ecc50SThomas Hellstrom list_add_tail(&node->head, &ctx->resource_list);
346038ecc50SThomas Hellstrom break;
347038ecc50SThomas Hellstrom }
348038ecc50SThomas Hellstrom }
349038ecc50SThomas Hellstrom
350038ecc50SThomas Hellstrom out_fill:
351a9f58c45SThomas Hellstrom if (dirty) {
352a9f58c45SThomas Hellstrom node->dirty_set = 1;
353a9f58c45SThomas Hellstrom /* Overwriting previous information here is intentional! */
354a9f58c45SThomas Hellstrom node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
355a9f58c45SThomas Hellstrom }
356038ecc50SThomas Hellstrom if (first_usage)
357038ecc50SThomas Hellstrom *first_usage = node->first_usage;
358038ecc50SThomas Hellstrom if (p_node)
359038ecc50SThomas Hellstrom *p_node = &node->private;
360038ecc50SThomas Hellstrom
361038ecc50SThomas Hellstrom return 0;
362038ecc50SThomas Hellstrom }
363038ecc50SThomas Hellstrom
364038ecc50SThomas Hellstrom /**
365a9f58c45SThomas Hellstrom * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
366a9f58c45SThomas Hellstrom * validation.
367a9f58c45SThomas Hellstrom * @ctx: The validation context.
368a9f58c45SThomas Hellstrom * @val_private: The additional meta-data pointer returned when the
369a9f58c45SThomas Hellstrom * resource was registered with the validation context. Used to identify
370a9f58c45SThomas Hellstrom * the resource.
371a9f58c45SThomas Hellstrom * @dirty: Dirty information VMW_RES_DIRTY_XX
372a9f58c45SThomas Hellstrom */
vmw_validation_res_set_dirty(struct vmw_validation_context * ctx,void * val_private,u32 dirty)373a9f58c45SThomas Hellstrom void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
374a9f58c45SThomas Hellstrom void *val_private, u32 dirty)
375a9f58c45SThomas Hellstrom {
376a9f58c45SThomas Hellstrom struct vmw_validation_res_node *val;
377a9f58c45SThomas Hellstrom
378a9f58c45SThomas Hellstrom if (!dirty)
379a9f58c45SThomas Hellstrom return;
380a9f58c45SThomas Hellstrom
381a9f58c45SThomas Hellstrom val = container_of(val_private, typeof(*val), private);
382a9f58c45SThomas Hellstrom val->dirty_set = 1;
383a9f58c45SThomas Hellstrom /* Overwriting previous information here is intentional! */
384a9f58c45SThomas Hellstrom val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
385a9f58c45SThomas Hellstrom }
386a9f58c45SThomas Hellstrom
387a9f58c45SThomas Hellstrom /**
388038ecc50SThomas Hellstrom * vmw_validation_res_switch_backup - Register a backup MOB switch during
389038ecc50SThomas Hellstrom * validation.
390038ecc50SThomas Hellstrom * @ctx: The validation context.
391038ecc50SThomas Hellstrom * @val_private: The additional meta-data pointer returned when the
392038ecc50SThomas Hellstrom * resource was registered with the validation context. Used to identify
393038ecc50SThomas Hellstrom * the resource.
394038ecc50SThomas Hellstrom * @vbo: The new backup buffer object MOB. This buffer object needs to have
395038ecc50SThomas Hellstrom * already been registered with the validation context.
396*668b2066SZack Rusin * @guest_memory_offset: Offset into the new backup MOB.
397038ecc50SThomas Hellstrom */
vmw_validation_res_switch_backup(struct vmw_validation_context * ctx,void * val_private,struct vmw_bo * vbo,unsigned long guest_memory_offset)398038ecc50SThomas Hellstrom void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
399038ecc50SThomas Hellstrom void *val_private,
40009881d29SZack Rusin struct vmw_bo *vbo,
401*668b2066SZack Rusin unsigned long guest_memory_offset)
402038ecc50SThomas Hellstrom {
403038ecc50SThomas Hellstrom struct vmw_validation_res_node *val;
404038ecc50SThomas Hellstrom
405038ecc50SThomas Hellstrom val = container_of(val_private, typeof(*val), private);
406038ecc50SThomas Hellstrom
407*668b2066SZack Rusin val->switching_guest_memory_bo = 1;
408038ecc50SThomas Hellstrom if (val->first_usage)
409038ecc50SThomas Hellstrom val->no_buffer_needed = 1;
410038ecc50SThomas Hellstrom
411*668b2066SZack Rusin val->new_guest_memory_bo = vbo;
412*668b2066SZack Rusin val->new_guest_memory_offset = guest_memory_offset;
413038ecc50SThomas Hellstrom }
414038ecc50SThomas Hellstrom
415038ecc50SThomas Hellstrom /**
416038ecc50SThomas Hellstrom * vmw_validation_res_reserve - Reserve all resources registered with this
417038ecc50SThomas Hellstrom * validation context.
418038ecc50SThomas Hellstrom * @ctx: The validation context.
419038ecc50SThomas Hellstrom * @intr: Use interruptible waits when possible.
420038ecc50SThomas Hellstrom *
421038ecc50SThomas Hellstrom * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
422038ecc50SThomas Hellstrom * code on failure.
423038ecc50SThomas Hellstrom */
vmw_validation_res_reserve(struct vmw_validation_context * ctx,bool intr)424038ecc50SThomas Hellstrom int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
425038ecc50SThomas Hellstrom bool intr)
426038ecc50SThomas Hellstrom {
427038ecc50SThomas Hellstrom struct vmw_validation_res_node *val;
428038ecc50SThomas Hellstrom int ret = 0;
429038ecc50SThomas Hellstrom
430038ecc50SThomas Hellstrom list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
431038ecc50SThomas Hellstrom
432038ecc50SThomas Hellstrom list_for_each_entry(val, &ctx->resource_list, head) {
433038ecc50SThomas Hellstrom struct vmw_resource *res = val->res;
434038ecc50SThomas Hellstrom
435038ecc50SThomas Hellstrom ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
436038ecc50SThomas Hellstrom if (ret)
437038ecc50SThomas Hellstrom goto out_unreserve;
438038ecc50SThomas Hellstrom
439038ecc50SThomas Hellstrom val->reserved = 1;
440*668b2066SZack Rusin if (res->guest_memory_bo) {
441*668b2066SZack Rusin struct vmw_bo *vbo = res->guest_memory_bo;
442038ecc50SThomas Hellstrom
44339985eeaSZack Rusin vmw_bo_placement_set(vbo,
44439985eeaSZack Rusin res->func->domain,
44539985eeaSZack Rusin res->func->busy_domain);
44639985eeaSZack Rusin ret = vmw_validation_add_bo(ctx, vbo);
447038ecc50SThomas Hellstrom if (ret)
448038ecc50SThomas Hellstrom goto out_unreserve;
449038ecc50SThomas Hellstrom }
450b7468b15SThomas Hellstrom
451*668b2066SZack Rusin if (val->switching_guest_memory_bo && val->new_guest_memory_bo &&
452b7468b15SThomas Hellstrom res->coherent) {
453b7468b15SThomas Hellstrom struct vmw_validation_bo_node *bo_node =
454b7468b15SThomas Hellstrom vmw_validation_find_bo_dup(ctx,
455*668b2066SZack Rusin val->new_guest_memory_bo);
456b7468b15SThomas Hellstrom
457b7468b15SThomas Hellstrom if (WARN_ON(!bo_node)) {
458b7468b15SThomas Hellstrom ret = -EINVAL;
459b7468b15SThomas Hellstrom goto out_unreserve;
460b7468b15SThomas Hellstrom }
461b7468b15SThomas Hellstrom bo_node->coherent_count++;
462b7468b15SThomas Hellstrom }
463038ecc50SThomas Hellstrom }
464038ecc50SThomas Hellstrom
465038ecc50SThomas Hellstrom return 0;
466038ecc50SThomas Hellstrom
467038ecc50SThomas Hellstrom out_unreserve:
468038ecc50SThomas Hellstrom vmw_validation_res_unreserve(ctx, true);
469038ecc50SThomas Hellstrom return ret;
470038ecc50SThomas Hellstrom }
471038ecc50SThomas Hellstrom
472038ecc50SThomas Hellstrom /**
473038ecc50SThomas Hellstrom * vmw_validation_res_unreserve - Unreserve all reserved resources
474038ecc50SThomas Hellstrom * registered with this validation context.
475038ecc50SThomas Hellstrom * @ctx: The validation context.
476038ecc50SThomas Hellstrom * @backoff: Whether this is a backoff- of a commit-type operation. This
477038ecc50SThomas Hellstrom * is used to determine whether to switch backup MOBs or not.
478038ecc50SThomas Hellstrom */
vmw_validation_res_unreserve(struct vmw_validation_context * ctx,bool backoff)479038ecc50SThomas Hellstrom void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
480038ecc50SThomas Hellstrom bool backoff)
481038ecc50SThomas Hellstrom {
482038ecc50SThomas Hellstrom struct vmw_validation_res_node *val;
483038ecc50SThomas Hellstrom
484038ecc50SThomas Hellstrom list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
485a9f58c45SThomas Hellstrom if (backoff)
486038ecc50SThomas Hellstrom list_for_each_entry(val, &ctx->resource_list, head) {
487038ecc50SThomas Hellstrom if (val->reserved)
488038ecc50SThomas Hellstrom vmw_resource_unreserve(val->res,
489a9f58c45SThomas Hellstrom false, false, false,
490a9f58c45SThomas Hellstrom NULL, 0);
491a9f58c45SThomas Hellstrom }
492a9f58c45SThomas Hellstrom else
493a9f58c45SThomas Hellstrom list_for_each_entry(val, &ctx->resource_list, head) {
494a9f58c45SThomas Hellstrom if (val->reserved)
495a9f58c45SThomas Hellstrom vmw_resource_unreserve(val->res,
496a9f58c45SThomas Hellstrom val->dirty_set,
497a9f58c45SThomas Hellstrom val->dirty,
498*668b2066SZack Rusin val->switching_guest_memory_bo,
499*668b2066SZack Rusin val->new_guest_memory_bo,
500*668b2066SZack Rusin val->new_guest_memory_offset);
501038ecc50SThomas Hellstrom }
502038ecc50SThomas Hellstrom }
503038ecc50SThomas Hellstrom
504038ecc50SThomas Hellstrom /**
505038ecc50SThomas Hellstrom * vmw_validation_bo_validate_single - Validate a single buffer object.
506038ecc50SThomas Hellstrom * @bo: The TTM buffer object base.
507038ecc50SThomas Hellstrom * @interruptible: Whether to perform waits interruptible if possible.
508038ecc50SThomas Hellstrom *
509038ecc50SThomas Hellstrom * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
510038ecc50SThomas Hellstrom * code on failure.
511038ecc50SThomas Hellstrom */
vmw_validation_bo_validate_single(struct ttm_buffer_object * bo,bool interruptible)51239985eeaSZack Rusin static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
51339985eeaSZack Rusin bool interruptible)
514038ecc50SThomas Hellstrom {
515*668b2066SZack Rusin struct vmw_bo *vbo = to_vmw_bo(&bo->base);
516038ecc50SThomas Hellstrom struct ttm_operation_ctx ctx = {
517038ecc50SThomas Hellstrom .interruptible = interruptible,
518038ecc50SThomas Hellstrom .no_wait_gpu = false
519038ecc50SThomas Hellstrom };
520038ecc50SThomas Hellstrom int ret;
521038ecc50SThomas Hellstrom
5227fb03cc3SChristian König if (atomic_read(&vbo->cpu_writers))
5237fb03cc3SChristian König return -EBUSY;
5247fb03cc3SChristian König
525*668b2066SZack Rusin if (vbo->tbo.pin_count > 0)
526038ecc50SThomas Hellstrom return 0;
527038ecc50SThomas Hellstrom
52839985eeaSZack Rusin ret = ttm_bo_validate(bo, &vbo->placement, &ctx);
529038ecc50SThomas Hellstrom if (ret == 0 || ret == -ERESTARTSYS)
530038ecc50SThomas Hellstrom return ret;
531038ecc50SThomas Hellstrom
53239985eeaSZack Rusin /*
53339985eeaSZack Rusin * If that failed, try again, this time evicting
534038ecc50SThomas Hellstrom * previous contents.
535038ecc50SThomas Hellstrom */
53639985eeaSZack Rusin ctx.allow_res_evict = true;
537038ecc50SThomas Hellstrom
53839985eeaSZack Rusin return ttm_bo_validate(bo, &vbo->placement, &ctx);
539038ecc50SThomas Hellstrom }
540038ecc50SThomas Hellstrom
541038ecc50SThomas Hellstrom /**
542038ecc50SThomas Hellstrom * vmw_validation_bo_validate - Validate all buffer objects registered with
543038ecc50SThomas Hellstrom * the validation context.
544038ecc50SThomas Hellstrom * @ctx: The validation context.
545038ecc50SThomas Hellstrom * @intr: Whether to perform waits interruptible if possible.
546038ecc50SThomas Hellstrom *
547038ecc50SThomas Hellstrom * Return: Zero on success, -ERESTARTSYS if interrupted,
548038ecc50SThomas Hellstrom * negative error code on failure.
549038ecc50SThomas Hellstrom */
vmw_validation_bo_validate(struct vmw_validation_context * ctx,bool intr)550038ecc50SThomas Hellstrom int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
551038ecc50SThomas Hellstrom {
552038ecc50SThomas Hellstrom struct vmw_validation_bo_node *entry;
553038ecc50SThomas Hellstrom int ret;
554038ecc50SThomas Hellstrom
555038ecc50SThomas Hellstrom list_for_each_entry(entry, &ctx->bo_list, base.head) {
556*668b2066SZack Rusin struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
557b7468b15SThomas Hellstrom
55839985eeaSZack Rusin ret = vmw_validation_bo_validate_single(entry->base.bo, intr);
559038ecc50SThomas Hellstrom
560038ecc50SThomas Hellstrom if (ret)
561038ecc50SThomas Hellstrom return ret;
562b7468b15SThomas Hellstrom
563b7468b15SThomas Hellstrom /*
564b7468b15SThomas Hellstrom * Rather than having the resource code allocating the bo
565b7468b15SThomas Hellstrom * dirty tracker in resource_unreserve() where we can't fail,
566b7468b15SThomas Hellstrom * Do it here when validating the buffer object.
567b7468b15SThomas Hellstrom */
568b7468b15SThomas Hellstrom if (entry->coherent_count) {
569b7468b15SThomas Hellstrom unsigned int coherent_count = entry->coherent_count;
570b7468b15SThomas Hellstrom
571b7468b15SThomas Hellstrom while (coherent_count) {
572b7468b15SThomas Hellstrom ret = vmw_bo_dirty_add(vbo);
573b7468b15SThomas Hellstrom if (ret)
574b7468b15SThomas Hellstrom return ret;
575b7468b15SThomas Hellstrom
576b7468b15SThomas Hellstrom coherent_count--;
577b7468b15SThomas Hellstrom }
578b7468b15SThomas Hellstrom entry->coherent_count -= coherent_count;
579b7468b15SThomas Hellstrom }
580b7468b15SThomas Hellstrom
581b7468b15SThomas Hellstrom if (vbo->dirty)
582b7468b15SThomas Hellstrom vmw_bo_dirty_scan(vbo);
583038ecc50SThomas Hellstrom }
584038ecc50SThomas Hellstrom return 0;
585038ecc50SThomas Hellstrom }
586038ecc50SThomas Hellstrom
587038ecc50SThomas Hellstrom /**
588038ecc50SThomas Hellstrom * vmw_validation_res_validate - Validate all resources registered with the
589038ecc50SThomas Hellstrom * validation context.
590038ecc50SThomas Hellstrom * @ctx: The validation context.
591038ecc50SThomas Hellstrom * @intr: Whether to perform waits interruptible if possible.
592038ecc50SThomas Hellstrom *
593038ecc50SThomas Hellstrom * Before this function is called, all resource backup buffers must have
594038ecc50SThomas Hellstrom * been validated.
595038ecc50SThomas Hellstrom *
596038ecc50SThomas Hellstrom * Return: Zero on success, -ERESTARTSYS if interrupted,
597038ecc50SThomas Hellstrom * negative error code on failure.
598038ecc50SThomas Hellstrom */
vmw_validation_res_validate(struct vmw_validation_context * ctx,bool intr)599038ecc50SThomas Hellstrom int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
600038ecc50SThomas Hellstrom {
601038ecc50SThomas Hellstrom struct vmw_validation_res_node *val;
602038ecc50SThomas Hellstrom int ret;
603038ecc50SThomas Hellstrom
604038ecc50SThomas Hellstrom list_for_each_entry(val, &ctx->resource_list, head) {
605038ecc50SThomas Hellstrom struct vmw_resource *res = val->res;
606*668b2066SZack Rusin struct vmw_bo *backup = res->guest_memory_bo;
607038ecc50SThomas Hellstrom
608fb80edb0SThomas Hellstrom ret = vmw_resource_validate(res, intr, val->dirty_set &&
609fb80edb0SThomas Hellstrom val->dirty);
610038ecc50SThomas Hellstrom if (ret) {
611038ecc50SThomas Hellstrom if (ret != -ERESTARTSYS)
612038ecc50SThomas Hellstrom DRM_ERROR("Failed to validate resource.\n");
613038ecc50SThomas Hellstrom return ret;
614038ecc50SThomas Hellstrom }
615038ecc50SThomas Hellstrom
616038ecc50SThomas Hellstrom /* Check if the resource switched backup buffer */
617*668b2066SZack Rusin if (backup && res->guest_memory_bo && backup != res->guest_memory_bo) {
618*668b2066SZack Rusin struct vmw_bo *vbo = res->guest_memory_bo;
619038ecc50SThomas Hellstrom
62039985eeaSZack Rusin vmw_bo_placement_set(vbo, res->func->domain,
62139985eeaSZack Rusin res->func->busy_domain);
62239985eeaSZack Rusin ret = vmw_validation_add_bo(ctx, vbo);
623038ecc50SThomas Hellstrom if (ret)
624038ecc50SThomas Hellstrom return ret;
625038ecc50SThomas Hellstrom }
626038ecc50SThomas Hellstrom }
627038ecc50SThomas Hellstrom return 0;
628038ecc50SThomas Hellstrom }
629038ecc50SThomas Hellstrom
630038ecc50SThomas Hellstrom /**
631038ecc50SThomas Hellstrom * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
632038ecc50SThomas Hellstrom * and unregister it from this validation context.
633038ecc50SThomas Hellstrom * @ctx: The validation context.
634038ecc50SThomas Hellstrom *
635038ecc50SThomas Hellstrom * The hash table used for duplicate finding is an expensive resource and
636038ecc50SThomas Hellstrom * may be protected by mutexes that may cause deadlocks during resource
637038ecc50SThomas Hellstrom * unreferencing if held. After resource- and buffer object registering,
638038ecc50SThomas Hellstrom * there is no longer any use for this hash table, so allow freeing it
639038ecc50SThomas Hellstrom * either to shorten any mutex locking time, or before resources- and
640038ecc50SThomas Hellstrom * buffer objects are freed during validation context cleanup.
641038ecc50SThomas Hellstrom */
vmw_validation_drop_ht(struct vmw_validation_context * ctx)642038ecc50SThomas Hellstrom void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
643038ecc50SThomas Hellstrom {
644038ecc50SThomas Hellstrom struct vmw_validation_bo_node *entry;
645038ecc50SThomas Hellstrom struct vmw_validation_res_node *val;
646038ecc50SThomas Hellstrom
6479e931f2eSMaaz Mombasawala if (!ctx->sw_context)
648038ecc50SThomas Hellstrom return;
649038ecc50SThomas Hellstrom
650038ecc50SThomas Hellstrom list_for_each_entry(entry, &ctx->bo_list, base.head)
6519e931f2eSMaaz Mombasawala hash_del_rcu(&entry->hash.head);
652038ecc50SThomas Hellstrom
653038ecc50SThomas Hellstrom list_for_each_entry(val, &ctx->resource_list, head)
6549e931f2eSMaaz Mombasawala hash_del_rcu(&val->hash.head);
655038ecc50SThomas Hellstrom
656038ecc50SThomas Hellstrom list_for_each_entry(val, &ctx->resource_ctx_list, head)
6579e931f2eSMaaz Mombasawala hash_del_rcu(&entry->hash.head);
658038ecc50SThomas Hellstrom
6599e931f2eSMaaz Mombasawala ctx->sw_context = NULL;
660038ecc50SThomas Hellstrom }
661038ecc50SThomas Hellstrom
662038ecc50SThomas Hellstrom /**
663038ecc50SThomas Hellstrom * vmw_validation_unref_lists - Unregister previously registered buffer
664038ecc50SThomas Hellstrom * object and resources.
665038ecc50SThomas Hellstrom * @ctx: The validation context.
666038ecc50SThomas Hellstrom *
667038ecc50SThomas Hellstrom * Note that this function may cause buffer object- and resource destructors
668038ecc50SThomas Hellstrom * to be invoked.
669038ecc50SThomas Hellstrom */
vmw_validation_unref_lists(struct vmw_validation_context * ctx)670038ecc50SThomas Hellstrom void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
671038ecc50SThomas Hellstrom {
672fc18afcfSThomas Hellstrom struct vmw_validation_bo_node *entry;
673fc18afcfSThomas Hellstrom struct vmw_validation_res_node *val;
674038ecc50SThomas Hellstrom
6756034d9d4SThomas Zimmermann list_for_each_entry(entry, &ctx->bo_list, base.head) {
6766034d9d4SThomas Zimmermann ttm_bo_put(entry->base.bo);
6776034d9d4SThomas Zimmermann entry->base.bo = NULL;
6786034d9d4SThomas Zimmermann }
679038ecc50SThomas Hellstrom
680038ecc50SThomas Hellstrom list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
681fc18afcfSThomas Hellstrom list_for_each_entry(val, &ctx->resource_list, head)
682038ecc50SThomas Hellstrom vmw_resource_unreference(&val->res);
683038ecc50SThomas Hellstrom
684fc18afcfSThomas Hellstrom /*
685fc18afcfSThomas Hellstrom * No need to detach each list entry since they are all freed with
686fc18afcfSThomas Hellstrom * vmw_validation_free_mem. Just make the inaccessible.
687fc18afcfSThomas Hellstrom */
688fc18afcfSThomas Hellstrom INIT_LIST_HEAD(&ctx->bo_list);
689fc18afcfSThomas Hellstrom INIT_LIST_HEAD(&ctx->resource_list);
690fc18afcfSThomas Hellstrom
691fc18afcfSThomas Hellstrom vmw_validation_mem_free(ctx);
692038ecc50SThomas Hellstrom }
693038ecc50SThomas Hellstrom
694038ecc50SThomas Hellstrom /**
695038ecc50SThomas Hellstrom * vmw_validation_prepare - Prepare a validation context for command
696038ecc50SThomas Hellstrom * submission.
697038ecc50SThomas Hellstrom * @ctx: The validation context.
698038ecc50SThomas Hellstrom * @mutex: The mutex used to protect resource reservation.
699038ecc50SThomas Hellstrom * @intr: Whether to perform waits interruptible if possible.
700038ecc50SThomas Hellstrom *
701038ecc50SThomas Hellstrom * Note that the single reservation mutex @mutex is an unfortunate
702038ecc50SThomas Hellstrom * construct. Ideally resource reservation should be moved to per-resource
703038ecc50SThomas Hellstrom * ww_mutexes.
704038ecc50SThomas Hellstrom * If this functions doesn't return Zero to indicate success, all resources
705038ecc50SThomas Hellstrom * are left unreserved but still referenced.
706038ecc50SThomas Hellstrom * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
707038ecc50SThomas Hellstrom * on error.
708038ecc50SThomas Hellstrom */
vmw_validation_prepare(struct vmw_validation_context * ctx,struct mutex * mutex,bool intr)709038ecc50SThomas Hellstrom int vmw_validation_prepare(struct vmw_validation_context *ctx,
710038ecc50SThomas Hellstrom struct mutex *mutex,
711038ecc50SThomas Hellstrom bool intr)
712038ecc50SThomas Hellstrom {
713038ecc50SThomas Hellstrom int ret = 0;
714038ecc50SThomas Hellstrom
715038ecc50SThomas Hellstrom if (mutex) {
716038ecc50SThomas Hellstrom if (intr)
717038ecc50SThomas Hellstrom ret = mutex_lock_interruptible(mutex);
718038ecc50SThomas Hellstrom else
719038ecc50SThomas Hellstrom mutex_lock(mutex);
720038ecc50SThomas Hellstrom if (ret)
721038ecc50SThomas Hellstrom return -ERESTARTSYS;
722038ecc50SThomas Hellstrom }
723038ecc50SThomas Hellstrom
724038ecc50SThomas Hellstrom ctx->res_mutex = mutex;
725038ecc50SThomas Hellstrom ret = vmw_validation_res_reserve(ctx, intr);
726038ecc50SThomas Hellstrom if (ret)
727038ecc50SThomas Hellstrom goto out_no_res_reserve;
728038ecc50SThomas Hellstrom
729038ecc50SThomas Hellstrom ret = vmw_validation_bo_reserve(ctx, intr);
730038ecc50SThomas Hellstrom if (ret)
731038ecc50SThomas Hellstrom goto out_no_bo_reserve;
732038ecc50SThomas Hellstrom
733038ecc50SThomas Hellstrom ret = vmw_validation_bo_validate(ctx, intr);
734038ecc50SThomas Hellstrom if (ret)
735038ecc50SThomas Hellstrom goto out_no_validate;
736038ecc50SThomas Hellstrom
737038ecc50SThomas Hellstrom ret = vmw_validation_res_validate(ctx, intr);
738038ecc50SThomas Hellstrom if (ret)
739038ecc50SThomas Hellstrom goto out_no_validate;
740038ecc50SThomas Hellstrom
741038ecc50SThomas Hellstrom return 0;
742038ecc50SThomas Hellstrom
743038ecc50SThomas Hellstrom out_no_validate:
744038ecc50SThomas Hellstrom vmw_validation_bo_backoff(ctx);
745038ecc50SThomas Hellstrom out_no_bo_reserve:
746038ecc50SThomas Hellstrom vmw_validation_res_unreserve(ctx, true);
747038ecc50SThomas Hellstrom out_no_res_reserve:
748038ecc50SThomas Hellstrom if (mutex)
749038ecc50SThomas Hellstrom mutex_unlock(mutex);
750038ecc50SThomas Hellstrom
751038ecc50SThomas Hellstrom return ret;
752038ecc50SThomas Hellstrom }
753038ecc50SThomas Hellstrom
754038ecc50SThomas Hellstrom /**
755038ecc50SThomas Hellstrom * vmw_validation_revert - Revert validation actions if command submission
756038ecc50SThomas Hellstrom * failed.
757038ecc50SThomas Hellstrom *
758038ecc50SThomas Hellstrom * @ctx: The validation context.
759038ecc50SThomas Hellstrom *
760038ecc50SThomas Hellstrom * The caller still needs to unref resources after a call to this function.
761038ecc50SThomas Hellstrom */
vmw_validation_revert(struct vmw_validation_context * ctx)762038ecc50SThomas Hellstrom void vmw_validation_revert(struct vmw_validation_context *ctx)
763038ecc50SThomas Hellstrom {
764038ecc50SThomas Hellstrom vmw_validation_bo_backoff(ctx);
765038ecc50SThomas Hellstrom vmw_validation_res_unreserve(ctx, true);
766038ecc50SThomas Hellstrom if (ctx->res_mutex)
767038ecc50SThomas Hellstrom mutex_unlock(ctx->res_mutex);
768fc18afcfSThomas Hellstrom vmw_validation_unref_lists(ctx);
769038ecc50SThomas Hellstrom }
770038ecc50SThomas Hellstrom
771038ecc50SThomas Hellstrom /**
7722cd80dbdSZack Rusin * vmw_validation_done - Commit validation actions after command submission
773038ecc50SThomas Hellstrom * success.
774038ecc50SThomas Hellstrom * @ctx: The validation context.
775038ecc50SThomas Hellstrom * @fence: Fence with which to fence all buffer objects taking part in the
776038ecc50SThomas Hellstrom * command submission.
777038ecc50SThomas Hellstrom *
778038ecc50SThomas Hellstrom * The caller does NOT need to unref resources after a call to this function.
779038ecc50SThomas Hellstrom */
vmw_validation_done(struct vmw_validation_context * ctx,struct vmw_fence_obj * fence)780038ecc50SThomas Hellstrom void vmw_validation_done(struct vmw_validation_context *ctx,
781038ecc50SThomas Hellstrom struct vmw_fence_obj *fence)
782038ecc50SThomas Hellstrom {
783038ecc50SThomas Hellstrom vmw_validation_bo_fence(ctx, fence);
784038ecc50SThomas Hellstrom vmw_validation_res_unreserve(ctx, false);
785038ecc50SThomas Hellstrom if (ctx->res_mutex)
786038ecc50SThomas Hellstrom mutex_unlock(ctx->res_mutex);
787038ecc50SThomas Hellstrom vmw_validation_unref_lists(ctx);
788038ecc50SThomas Hellstrom }
78964ad2abfSThomas Hellstrom
79064ad2abfSThomas Hellstrom /**
79164ad2abfSThomas Hellstrom * vmw_validation_preload_bo - Preload the validation memory allocator for a
79264ad2abfSThomas Hellstrom * call to vmw_validation_add_bo().
79364ad2abfSThomas Hellstrom * @ctx: Pointer to the validation context.
79464ad2abfSThomas Hellstrom *
79564ad2abfSThomas Hellstrom * Iff this function returns successfully, the next call to
79664ad2abfSThomas Hellstrom * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
79764ad2abfSThomas Hellstrom * but voids the guarantee.
79864ad2abfSThomas Hellstrom *
79964ad2abfSThomas Hellstrom * Returns: Zero if successful, %-EINVAL otherwise.
80064ad2abfSThomas Hellstrom */
vmw_validation_preload_bo(struct vmw_validation_context * ctx)80164ad2abfSThomas Hellstrom int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
80264ad2abfSThomas Hellstrom {
80364ad2abfSThomas Hellstrom unsigned int size = sizeof(struct vmw_validation_bo_node);
80464ad2abfSThomas Hellstrom
80564ad2abfSThomas Hellstrom if (!vmw_validation_mem_alloc(ctx, size))
80664ad2abfSThomas Hellstrom return -ENOMEM;
80764ad2abfSThomas Hellstrom
80864ad2abfSThomas Hellstrom ctx->mem_size_left += size;
80964ad2abfSThomas Hellstrom return 0;
81064ad2abfSThomas Hellstrom }
81164ad2abfSThomas Hellstrom
81264ad2abfSThomas Hellstrom /**
81364ad2abfSThomas Hellstrom * vmw_validation_preload_res - Preload the validation memory allocator for a
81464ad2abfSThomas Hellstrom * call to vmw_validation_add_res().
81564ad2abfSThomas Hellstrom * @ctx: Pointer to the validation context.
81664ad2abfSThomas Hellstrom * @size: Size of the validation node extra data. See below.
81764ad2abfSThomas Hellstrom *
81864ad2abfSThomas Hellstrom * Iff this function returns successfully, the next call to
81964ad2abfSThomas Hellstrom * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
82064ad2abfSThomas Hellstrom * sleep. An error is not fatal but voids the guarantee.
82164ad2abfSThomas Hellstrom *
82264ad2abfSThomas Hellstrom * Returns: Zero if successful, %-EINVAL otherwise.
82364ad2abfSThomas Hellstrom */
vmw_validation_preload_res(struct vmw_validation_context * ctx,unsigned int size)82464ad2abfSThomas Hellstrom int vmw_validation_preload_res(struct vmw_validation_context *ctx,
82564ad2abfSThomas Hellstrom unsigned int size)
82664ad2abfSThomas Hellstrom {
82764ad2abfSThomas Hellstrom size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
82864ad2abfSThomas Hellstrom size) +
82964ad2abfSThomas Hellstrom vmw_validation_align(sizeof(struct vmw_validation_bo_node));
83064ad2abfSThomas Hellstrom if (!vmw_validation_mem_alloc(ctx, size))
83164ad2abfSThomas Hellstrom return -ENOMEM;
83264ad2abfSThomas Hellstrom
83364ad2abfSThomas Hellstrom ctx->mem_size_left += size;
83464ad2abfSThomas Hellstrom return 0;
83564ad2abfSThomas Hellstrom }
836b7468b15SThomas Hellstrom
837b7468b15SThomas Hellstrom /**
838b7468b15SThomas Hellstrom * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
839b7468b15SThomas Hellstrom * validation context
840b7468b15SThomas Hellstrom * @ctx: The validation context
841b7468b15SThomas Hellstrom *
842b7468b15SThomas Hellstrom * This function unreserves the buffer objects previously reserved using
843b7468b15SThomas Hellstrom * vmw_validation_bo_reserve. It's typically used as part of an error path
844b7468b15SThomas Hellstrom */
vmw_validation_bo_backoff(struct vmw_validation_context * ctx)845b7468b15SThomas Hellstrom void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
846b7468b15SThomas Hellstrom {
847b7468b15SThomas Hellstrom struct vmw_validation_bo_node *entry;
848b7468b15SThomas Hellstrom
849b7468b15SThomas Hellstrom /*
850b7468b15SThomas Hellstrom * Switching coherent resource backup buffers failed.
851b7468b15SThomas Hellstrom * Release corresponding buffer object dirty trackers.
852b7468b15SThomas Hellstrom */
853b7468b15SThomas Hellstrom list_for_each_entry(entry, &ctx->bo_list, base.head) {
854b7468b15SThomas Hellstrom if (entry->coherent_count) {
855b7468b15SThomas Hellstrom unsigned int coherent_count = entry->coherent_count;
856*668b2066SZack Rusin struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
857b7468b15SThomas Hellstrom
858b7468b15SThomas Hellstrom while (coherent_count--)
859b7468b15SThomas Hellstrom vmw_bo_dirty_release(vbo);
860b7468b15SThomas Hellstrom }
861b7468b15SThomas Hellstrom }
862b7468b15SThomas Hellstrom
863b7468b15SThomas Hellstrom ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
864b7468b15SThomas Hellstrom }
865