1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <christian.koenig@amd.com>
29  */
30 
31 #include <drm/drmP.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34 
35 struct amdgpu_sync_entry {
36 	struct hlist_node	node;
37 	struct dma_fence	*fence;
38 };
39 
40 static struct kmem_cache *amdgpu_sync_slab;
41 
42 /**
43  * amdgpu_sync_create - zero init sync object
44  *
45  * @sync: sync object to initialize
46  *
47  * Just clear the sync object for now.
48  */
49 void amdgpu_sync_create(struct amdgpu_sync *sync)
50 {
51 	hash_init(sync->fences);
52 	sync->last_vm_update = NULL;
53 }
54 
55 /**
56  * amdgpu_sync_same_dev - test if fence belong to us
57  *
58  * @adev: amdgpu device to use for the test
59  * @f: fence to test
60  *
61  * Test if the fence was issued by us.
62  */
63 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
64 				 struct dma_fence *f)
65 {
66 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
67 
68 	if (s_fence) {
69 		struct amdgpu_ring *ring;
70 
71 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
72 		return ring->adev == adev;
73 	}
74 
75 	return false;
76 }
77 
78 /**
79  * amdgpu_sync_get_owner - extract the owner of a fence
80  *
81  * @fence: fence get the owner from
82  *
83  * Extract who originally created the fence.
84  */
85 static void *amdgpu_sync_get_owner(struct dma_fence *f)
86 {
87 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
88 
89 	if (s_fence)
90 		return s_fence->owner;
91 
92 	return AMDGPU_FENCE_OWNER_UNDEFINED;
93 }
94 
95 /**
96  * amdgpu_sync_keep_later - Keep the later fence
97  *
98  * @keep: existing fence to test
99  * @fence: new fence
100  *
101  * Either keep the existing fence or the new one, depending which one is later.
102  */
103 static void amdgpu_sync_keep_later(struct dma_fence **keep,
104 				   struct dma_fence *fence)
105 {
106 	if (*keep && dma_fence_is_later(*keep, fence))
107 		return;
108 
109 	dma_fence_put(*keep);
110 	*keep = dma_fence_get(fence);
111 }
112 
113 /**
114  * amdgpu_sync_add_later - add the fence to the hash
115  *
116  * @sync: sync object to add the fence to
117  * @f: fence to add
118  *
119  * Tries to add the fence to an existing hash entry. Returns true when an entry
120  * was found, false otherwise.
121  */
122 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
123 {
124 	struct amdgpu_sync_entry *e;
125 
126 	hash_for_each_possible(sync->fences, e, node, f->context) {
127 		if (unlikely(e->fence->context != f->context))
128 			continue;
129 
130 		amdgpu_sync_keep_later(&e->fence, f);
131 		return true;
132 	}
133 	return false;
134 }
135 
136 /**
137  * amdgpu_sync_fence - remember to sync to this fence
138  *
139  * @sync: sync object to add fence to
140  * @fence: fence to sync to
141  *
142  */
143 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
144 		      struct dma_fence *f)
145 {
146 	struct amdgpu_sync_entry *e;
147 
148 	if (!f)
149 		return 0;
150 
151 	if (amdgpu_sync_same_dev(adev, f) &&
152 	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
153 		amdgpu_sync_keep_later(&sync->last_vm_update, f);
154 
155 	if (amdgpu_sync_add_later(sync, f))
156 		return 0;
157 
158 	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
159 	if (!e)
160 		return -ENOMEM;
161 
162 	hash_add(sync->fences, &e->node, f->context);
163 	e->fence = dma_fence_get(f);
164 	return 0;
165 }
166 
167 /**
168  * amdgpu_sync_resv - sync to a reservation object
169  *
170  * @sync: sync object to add fences from reservation object to
171  * @resv: reservation object with embedded fence
172  * @explicit_sync: true if we should only sync to the exclusive fence
173  *
174  * Sync to the fence
175  */
176 int amdgpu_sync_resv(struct amdgpu_device *adev,
177 		     struct amdgpu_sync *sync,
178 		     struct reservation_object *resv,
179 		     void *owner, bool explicit_sync)
180 {
181 	struct reservation_object_list *flist;
182 	struct dma_fence *f;
183 	void *fence_owner;
184 	unsigned i;
185 	int r = 0;
186 
187 	if (resv == NULL)
188 		return -EINVAL;
189 
190 	/* always sync to the exclusive fence */
191 	f = reservation_object_get_excl(resv);
192 	r = amdgpu_sync_fence(adev, sync, f);
193 
194 	if (explicit_sync)
195 		return r;
196 
197 	flist = reservation_object_get_list(resv);
198 	if (!flist || r)
199 		return r;
200 
201 	for (i = 0; i < flist->shared_count; ++i) {
202 		f = rcu_dereference_protected(flist->shared[i],
203 					      reservation_object_held(resv));
204 		if (amdgpu_sync_same_dev(adev, f)) {
205 			/* VM updates are only interesting
206 			 * for other VM updates and moves.
207 			 */
208 			fence_owner = amdgpu_sync_get_owner(f);
209 			if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
210 			    (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
211 			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
212 			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
213 				continue;
214 
215 			/* Ignore fence from the same owner as
216 			 * long as it isn't undefined.
217 			 */
218 			if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
219 			    fence_owner == owner)
220 				continue;
221 		}
222 
223 		r = amdgpu_sync_fence(adev, sync, f);
224 		if (r)
225 			break;
226 	}
227 	return r;
228 }
229 
230 /**
231  * amdgpu_sync_peek_fence - get the next fence not signaled yet
232  *
233  * @sync: the sync object
234  * @ring: optional ring to use for test
235  *
236  * Returns the next fence not signaled yet without removing it from the sync
237  * object.
238  */
239 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
240 					 struct amdgpu_ring *ring)
241 {
242 	struct amdgpu_sync_entry *e;
243 	struct hlist_node *tmp;
244 	int i;
245 
246 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
247 		struct dma_fence *f = e->fence;
248 		struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
249 
250 		if (dma_fence_is_signaled(f)) {
251 			hash_del(&e->node);
252 			dma_fence_put(f);
253 			kmem_cache_free(amdgpu_sync_slab, e);
254 			continue;
255 		}
256 		if (ring && s_fence) {
257 			/* For fences from the same ring it is sufficient
258 			 * when they are scheduled.
259 			 */
260 			if (s_fence->sched == &ring->sched) {
261 				if (dma_fence_is_signaled(&s_fence->scheduled))
262 					continue;
263 
264 				return &s_fence->scheduled;
265 			}
266 		}
267 
268 		return f;
269 	}
270 
271 	return NULL;
272 }
273 
274 /**
275  * amdgpu_sync_get_fence - get the next fence from the sync object
276  *
277  * @sync: sync object to use
278  *
279  * Get and removes the next fence from the sync object not signaled yet.
280  */
281 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
282 {
283 	struct amdgpu_sync_entry *e;
284 	struct hlist_node *tmp;
285 	struct dma_fence *f;
286 	int i;
287 
288 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
289 
290 		f = e->fence;
291 
292 		hash_del(&e->node);
293 		kmem_cache_free(amdgpu_sync_slab, e);
294 
295 		if (!dma_fence_is_signaled(f))
296 			return f;
297 
298 		dma_fence_put(f);
299 	}
300 	return NULL;
301 }
302 
303 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
304 {
305 	struct amdgpu_sync_entry *e;
306 	struct hlist_node *tmp;
307 	int i, r;
308 
309 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
310 		r = dma_fence_wait(e->fence, intr);
311 		if (r)
312 			return r;
313 
314 		hash_del(&e->node);
315 		dma_fence_put(e->fence);
316 		kmem_cache_free(amdgpu_sync_slab, e);
317 	}
318 
319 	return 0;
320 }
321 
322 /**
323  * amdgpu_sync_free - free the sync object
324  *
325  * @sync: sync object to use
326  *
327  * Free the sync object.
328  */
329 void amdgpu_sync_free(struct amdgpu_sync *sync)
330 {
331 	struct amdgpu_sync_entry *e;
332 	struct hlist_node *tmp;
333 	unsigned i;
334 
335 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
336 		hash_del(&e->node);
337 		dma_fence_put(e->fence);
338 		kmem_cache_free(amdgpu_sync_slab, e);
339 	}
340 
341 	dma_fence_put(sync->last_vm_update);
342 }
343 
344 /**
345  * amdgpu_sync_init - init sync object subsystem
346  *
347  * Allocate the slab allocator.
348  */
349 int amdgpu_sync_init(void)
350 {
351 	amdgpu_sync_slab = kmem_cache_create(
352 		"amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
353 		SLAB_HWCACHE_ALIGN, NULL);
354 	if (!amdgpu_sync_slab)
355 		return -ENOMEM;
356 
357 	return 0;
358 }
359 
360 /**
361  * amdgpu_sync_fini - fini sync object subsystem
362  *
363  * Free the slab allocator.
364  */
365 void amdgpu_sync_fini(void)
366 {
367 	kmem_cache_destroy(amdgpu_sync_slab);
368 }
369