xref: /openbmc/linux/drivers/gpu/drm/i915/i915_deps.c (revision a3185f91)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <linux/dma-fence.h>
7 #include <linux/slab.h>
8 
9 #include <drm/ttm/ttm_bo.h>
10 
11 #include "i915_deps.h"
12 
13 /**
14  * DOC: Set of utilities to dynamically collect dependencies into a
15  * structure which is fed into the GT migration code.
16  *
17  * Once we can do async unbinding, this is also needed to coalesce
18  * the migration fence with the unbind fences if these are coalesced
19  * post-migration.
20  *
21  * While collecting the individual dependencies, we store the refcounted
22  * struct dma_fence pointers in a realloc-managed pointer array, since
23  * that can be easily fed into a dma_fence_array. Other options are
24  * available, like for example an xarray for similarity with drm/sched.
25  * Can be changed easily if needed.
26  *
27  * A struct i915_deps need to be initialized using i915_deps_init().
28  * If i915_deps_add_dependency() or i915_deps_add_resv() return an
29  * error code they will internally call i915_deps_fini(), which frees
30  * all internal references and allocations.
31  */
32 
33 /* Min number of fence pointers in the array when an allocation occurs. */
34 #define I915_DEPS_MIN_ALLOC_CHUNK 8U
35 
i915_deps_reset_fences(struct i915_deps * deps)36 static void i915_deps_reset_fences(struct i915_deps *deps)
37 {
38 	if (deps->fences != &deps->single)
39 		kfree(deps->fences);
40 	deps->num_deps = 0;
41 	deps->fences_size = 1;
42 	deps->fences = &deps->single;
43 }
44 
45 /**
46  * i915_deps_init - Initialize an i915_deps structure
47  * @deps: Pointer to the i915_deps structure to initialize.
48  * @gfp: The allocation mode for subsequenst allocations.
49  */
i915_deps_init(struct i915_deps * deps,gfp_t gfp)50 void i915_deps_init(struct i915_deps *deps, gfp_t gfp)
51 {
52 	deps->fences = NULL;
53 	deps->gfp = gfp;
54 	i915_deps_reset_fences(deps);
55 }
56 
57 /**
58  * i915_deps_fini - Finalize an i915_deps structure
59  * @deps: Pointer to the i915_deps structure to finalize.
60  *
61  * This function drops all fence references taken, conditionally frees and
62  * then resets the fences array.
63  */
i915_deps_fini(struct i915_deps * deps)64 void i915_deps_fini(struct i915_deps *deps)
65 {
66 	unsigned int i;
67 
68 	for (i = 0; i < deps->num_deps; ++i)
69 		dma_fence_put(deps->fences[i]);
70 
71 	if (deps->fences != &deps->single)
72 		kfree(deps->fences);
73 }
74 
i915_deps_grow(struct i915_deps * deps,struct dma_fence * fence,const struct ttm_operation_ctx * ctx)75 static int i915_deps_grow(struct i915_deps *deps, struct dma_fence *fence,
76 			  const struct ttm_operation_ctx *ctx)
77 {
78 	int ret;
79 
80 	if (deps->num_deps >= deps->fences_size) {
81 		unsigned int new_size = 2 * deps->fences_size;
82 		struct dma_fence **new_fences;
83 
84 		new_size = max(new_size, I915_DEPS_MIN_ALLOC_CHUNK);
85 		new_fences = kmalloc_array(new_size, sizeof(*new_fences), deps->gfp);
86 		if (!new_fences)
87 			goto sync;
88 
89 		memcpy(new_fences, deps->fences,
90 		       deps->fences_size * sizeof(*new_fences));
91 		swap(new_fences, deps->fences);
92 		if (new_fences != &deps->single)
93 			kfree(new_fences);
94 		deps->fences_size = new_size;
95 	}
96 	deps->fences[deps->num_deps++] = dma_fence_get(fence);
97 	return 0;
98 
99 sync:
100 	if (ctx->no_wait_gpu && !dma_fence_is_signaled(fence)) {
101 		ret = -EBUSY;
102 		goto unref;
103 	}
104 
105 	ret = dma_fence_wait(fence, ctx->interruptible);
106 	if (ret)
107 		goto unref;
108 
109 	ret = fence->error;
110 	if (ret)
111 		goto unref;
112 
113 	return 0;
114 
115 unref:
116 	i915_deps_fini(deps);
117 	return ret;
118 }
119 
120 /**
121  * i915_deps_sync - Wait for all the fences in the dependency collection
122  * @deps: Pointer to the i915_deps structure the fences of which to wait for.
123  * @ctx: Pointer to a struct ttm_operation_ctx indicating how the waits
124  * should be performed.
125  *
126  * This function waits for fences in the dependency collection. If it
127  * encounters an error during the wait or a fence error, the wait for
128  * further fences is aborted and the error returned.
129  *
130  * Return: Zero if successful, Negative error code on error.
131  */
i915_deps_sync(const struct i915_deps * deps,const struct ttm_operation_ctx * ctx)132 int i915_deps_sync(const struct i915_deps *deps, const struct ttm_operation_ctx *ctx)
133 {
134 	struct dma_fence **fences = deps->fences;
135 	unsigned int i;
136 	int ret = 0;
137 
138 	for (i = 0; i < deps->num_deps; ++i, ++fences) {
139 		if (ctx->no_wait_gpu && !dma_fence_is_signaled(*fences)) {
140 			ret = -EBUSY;
141 			break;
142 		}
143 
144 		ret = dma_fence_wait(*fences, ctx->interruptible);
145 		if (!ret)
146 			ret = (*fences)->error;
147 		if (ret)
148 			break;
149 	}
150 
151 	return ret;
152 }
153 
154 /**
155  * i915_deps_add_dependency - Add a fence to the dependency collection
156  * @deps: Pointer to the i915_deps structure a fence is to be added to.
157  * @fence: The fence to add.
158  * @ctx: Pointer to a struct ttm_operation_ctx indicating how waits are to
159  * be performed if waiting.
160  *
161  * Adds a fence to the dependency collection, and takes a reference on it.
162  * If the fence context is not zero and there was a later fence from the
163  * same fence context already added, then the fence is not added to the
164  * dependency collection. If the fence context is not zero and there was
165  * an earlier fence already added, then the fence will replace the older
166  * fence from the same context and the reference on the earlier fence will
167  * be dropped.
168  * If there is a failure to allocate memory to accommodate the new fence to
169  * be added, the new fence will instead be waited for and an error may
170  * be returned; depending on the value of @ctx, or if there was a fence
171  * error. If an error was returned, the dependency collection will be
172  * finalized and all fence reference dropped.
173  *
174  * Return: 0 if success. Negative error code on error.
175  */
i915_deps_add_dependency(struct i915_deps * deps,struct dma_fence * fence,const struct ttm_operation_ctx * ctx)176 int i915_deps_add_dependency(struct i915_deps *deps,
177 			     struct dma_fence *fence,
178 			     const struct ttm_operation_ctx *ctx)
179 {
180 	unsigned int i;
181 	int ret;
182 
183 	if (!fence)
184 		return 0;
185 
186 	if (dma_fence_is_signaled(fence)) {
187 		ret = fence->error;
188 		if (ret)
189 			i915_deps_fini(deps);
190 		return ret;
191 	}
192 
193 	for (i = 0; i < deps->num_deps; ++i) {
194 		struct dma_fence *entry = deps->fences[i];
195 
196 		if (!entry->context || entry->context != fence->context)
197 			continue;
198 
199 		if (dma_fence_is_later(fence, entry)) {
200 			dma_fence_put(entry);
201 			deps->fences[i] = dma_fence_get(fence);
202 		}
203 
204 		return 0;
205 	}
206 
207 	return i915_deps_grow(deps, fence, ctx);
208 }
209 
210 /**
211  * i915_deps_add_resv - Add the fences of a reservation object to a dependency
212  * collection.
213  * @deps: Pointer to the i915_deps structure a fence is to be added to.
214  * @resv: The reservation object, then fences of which to add.
215  * @ctx: Pointer to a struct ttm_operation_ctx indicating how waits are to
216  * be performed if waiting.
217  *
218  * Calls i915_deps_add_depencency() on the indicated fences of @resv.
219  *
220  * Return: Zero on success. Negative error code on error.
221  */
i915_deps_add_resv(struct i915_deps * deps,struct dma_resv * resv,const struct ttm_operation_ctx * ctx)222 int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv,
223 		       const struct ttm_operation_ctx *ctx)
224 {
225 	struct dma_resv_iter iter;
226 	struct dma_fence *fence;
227 
228 	dma_resv_assert_held(resv);
229 	dma_resv_for_each_fence(&iter, resv, dma_resv_usage_rw(true), fence) {
230 		int ret = i915_deps_add_dependency(deps, fence, ctx);
231 
232 		if (ret)
233 			return ret;
234 	}
235 
236 	return 0;
237 }
238