1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <christian.koenig@amd.com>
29  */
30 
31 #include "amdgpu.h"
32 #include "amdgpu_trace.h"
33 #include "amdgpu_amdkfd.h"
34 
35 struct amdgpu_sync_entry {
36 	struct hlist_node	node;
37 	struct dma_fence	*fence;
38 	bool	explicit;
39 };
40 
41 static struct kmem_cache *amdgpu_sync_slab;
42 
43 /**
44  * amdgpu_sync_create - zero init sync object
45  *
46  * @sync: sync object to initialize
47  *
48  * Just clear the sync object for now.
49  */
50 void amdgpu_sync_create(struct amdgpu_sync *sync)
51 {
52 	hash_init(sync->fences);
53 	sync->last_vm_update = NULL;
54 }
55 
56 /**
57  * amdgpu_sync_same_dev - test if fence belong to us
58  *
59  * @adev: amdgpu device to use for the test
60  * @f: fence to test
61  *
62  * Test if the fence was issued by us.
63  */
64 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
65 				 struct dma_fence *f)
66 {
67 	struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
68 
69 	if (s_fence) {
70 		struct amdgpu_ring *ring;
71 
72 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
73 		return ring->adev == adev;
74 	}
75 
76 	return false;
77 }
78 
79 /**
80  * amdgpu_sync_get_owner - extract the owner of a fence
81  *
82  * @fence: fence get the owner from
83  *
84  * Extract who originally created the fence.
85  */
86 static void *amdgpu_sync_get_owner(struct dma_fence *f)
87 {
88 	struct drm_sched_fence *s_fence;
89 	struct amdgpu_amdkfd_fence *kfd_fence;
90 
91 	if (!f)
92 		return AMDGPU_FENCE_OWNER_UNDEFINED;
93 
94 	s_fence = to_drm_sched_fence(f);
95 	if (s_fence)
96 		return s_fence->owner;
97 
98 	kfd_fence = to_amdgpu_amdkfd_fence(f);
99 	if (kfd_fence)
100 		return AMDGPU_FENCE_OWNER_KFD;
101 
102 	return AMDGPU_FENCE_OWNER_UNDEFINED;
103 }
104 
105 /**
106  * amdgpu_sync_keep_later - Keep the later fence
107  *
108  * @keep: existing fence to test
109  * @fence: new fence
110  *
111  * Either keep the existing fence or the new one, depending which one is later.
112  */
113 static void amdgpu_sync_keep_later(struct dma_fence **keep,
114 				   struct dma_fence *fence)
115 {
116 	if (*keep && dma_fence_is_later(*keep, fence))
117 		return;
118 
119 	dma_fence_put(*keep);
120 	*keep = dma_fence_get(fence);
121 }
122 
123 /**
124  * amdgpu_sync_add_later - add the fence to the hash
125  *
126  * @sync: sync object to add the fence to
127  * @f: fence to add
128  *
129  * Tries to add the fence to an existing hash entry. Returns true when an entry
130  * was found, false otherwise.
131  */
132 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
133 {
134 	struct amdgpu_sync_entry *e;
135 
136 	hash_for_each_possible(sync->fences, e, node, f->context) {
137 		if (unlikely(e->fence->context != f->context))
138 			continue;
139 
140 		amdgpu_sync_keep_later(&e->fence, f);
141 
142 		/* Preserve eplicit flag to not loose pipe line sync */
143 		e->explicit |= explicit;
144 
145 		return true;
146 	}
147 	return false;
148 }
149 
150 /**
151  * amdgpu_sync_fence - remember to sync to this fence
152  *
153  * @sync: sync object to add fence to
154  * @fence: fence to sync to
155  *
156  */
157 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
158 		      struct dma_fence *f, bool explicit)
159 {
160 	struct amdgpu_sync_entry *e;
161 
162 	if (!f)
163 		return 0;
164 	if (amdgpu_sync_same_dev(adev, f) &&
165 	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
166 		amdgpu_sync_keep_later(&sync->last_vm_update, f);
167 
168 	if (amdgpu_sync_add_later(sync, f, explicit))
169 		return 0;
170 
171 	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
172 	if (!e)
173 		return -ENOMEM;
174 
175 	e->explicit = explicit;
176 
177 	hash_add(sync->fences, &e->node, f->context);
178 	e->fence = dma_fence_get(f);
179 	return 0;
180 }
181 
182 /**
183  * amdgpu_sync_resv - sync to a reservation object
184  *
185  * @sync: sync object to add fences from reservation object to
186  * @resv: reservation object with embedded fence
187  * @explicit_sync: true if we should only sync to the exclusive fence
188  *
189  * Sync to the fence
190  */
191 int amdgpu_sync_resv(struct amdgpu_device *adev,
192 		     struct amdgpu_sync *sync,
193 		     struct reservation_object *resv,
194 		     void *owner, bool explicit_sync)
195 {
196 	struct reservation_object_list *flist;
197 	struct dma_fence *f;
198 	void *fence_owner;
199 	unsigned i;
200 	int r = 0;
201 
202 	if (resv == NULL)
203 		return -EINVAL;
204 
205 	/* always sync to the exclusive fence */
206 	f = reservation_object_get_excl(resv);
207 	r = amdgpu_sync_fence(adev, sync, f, false);
208 
209 	flist = reservation_object_get_list(resv);
210 	if (!flist || r)
211 		return r;
212 
213 	for (i = 0; i < flist->shared_count; ++i) {
214 		f = rcu_dereference_protected(flist->shared[i],
215 					      reservation_object_held(resv));
216 		/* We only want to trigger KFD eviction fences on
217 		 * evict or move jobs. Skip KFD fences otherwise.
218 		 */
219 		fence_owner = amdgpu_sync_get_owner(f);
220 		if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
221 		    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
222 			continue;
223 
224 		if (amdgpu_sync_same_dev(adev, f)) {
225 			/* VM updates are only interesting
226 			 * for other VM updates and moves.
227 			 */
228 			if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
229 			    (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
230 			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
231 			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
232 				continue;
233 
234 			/* Ignore fence from the same owner and explicit one as
235 			 * long as it isn't undefined.
236 			 */
237 			if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
238 			    (fence_owner == owner || explicit_sync))
239 				continue;
240 		}
241 
242 		r = amdgpu_sync_fence(adev, sync, f, false);
243 		if (r)
244 			break;
245 	}
246 	return r;
247 }
248 
249 /**
250  * amdgpu_sync_peek_fence - get the next fence not signaled yet
251  *
252  * @sync: the sync object
253  * @ring: optional ring to use for test
254  *
255  * Returns the next fence not signaled yet without removing it from the sync
256  * object.
257  */
258 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
259 					 struct amdgpu_ring *ring)
260 {
261 	struct amdgpu_sync_entry *e;
262 	struct hlist_node *tmp;
263 	int i;
264 
265 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
266 		struct dma_fence *f = e->fence;
267 		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
268 
269 		if (dma_fence_is_signaled(f)) {
270 			hash_del(&e->node);
271 			dma_fence_put(f);
272 			kmem_cache_free(amdgpu_sync_slab, e);
273 			continue;
274 		}
275 		if (ring && s_fence) {
276 			/* For fences from the same ring it is sufficient
277 			 * when they are scheduled.
278 			 */
279 			if (s_fence->sched == &ring->sched) {
280 				if (dma_fence_is_signaled(&s_fence->scheduled))
281 					continue;
282 
283 				return &s_fence->scheduled;
284 			}
285 		}
286 
287 		return f;
288 	}
289 
290 	return NULL;
291 }
292 
293 /**
294  * amdgpu_sync_get_fence - get the next fence from the sync object
295  *
296  * @sync: sync object to use
297  * @explicit: true if the next fence is explicit
298  *
299  * Get and removes the next fence from the sync object not signaled yet.
300  */
301 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
302 {
303 	struct amdgpu_sync_entry *e;
304 	struct hlist_node *tmp;
305 	struct dma_fence *f;
306 	int i;
307 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
308 
309 		f = e->fence;
310 		if (explicit)
311 			*explicit = e->explicit;
312 
313 		hash_del(&e->node);
314 		kmem_cache_free(amdgpu_sync_slab, e);
315 
316 		if (!dma_fence_is_signaled(f))
317 			return f;
318 
319 		dma_fence_put(f);
320 	}
321 	return NULL;
322 }
323 
324 /**
325  * amdgpu_sync_clone - clone a sync object
326  *
327  * @source: sync object to clone
328  * @clone: pointer to destination sync object
329  *
330  * Adds references to all unsignaled fences in @source to @clone. Also
331  * removes signaled fences from @source while at it.
332  */
333 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
334 {
335 	struct amdgpu_sync_entry *e;
336 	struct hlist_node *tmp;
337 	struct dma_fence *f;
338 	int i, r;
339 
340 	hash_for_each_safe(source->fences, i, tmp, e, node) {
341 		f = e->fence;
342 		if (!dma_fence_is_signaled(f)) {
343 			r = amdgpu_sync_fence(NULL, clone, f, e->explicit);
344 			if (r)
345 				return r;
346 		} else {
347 			hash_del(&e->node);
348 			dma_fence_put(f);
349 			kmem_cache_free(amdgpu_sync_slab, e);
350 		}
351 	}
352 
353 	dma_fence_put(clone->last_vm_update);
354 	clone->last_vm_update = dma_fence_get(source->last_vm_update);
355 
356 	return 0;
357 }
358 
359 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
360 {
361 	struct amdgpu_sync_entry *e;
362 	struct hlist_node *tmp;
363 	int i, r;
364 
365 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
366 		r = dma_fence_wait(e->fence, intr);
367 		if (r)
368 			return r;
369 
370 		hash_del(&e->node);
371 		dma_fence_put(e->fence);
372 		kmem_cache_free(amdgpu_sync_slab, e);
373 	}
374 
375 	return 0;
376 }
377 
378 /**
379  * amdgpu_sync_free - free the sync object
380  *
381  * @sync: sync object to use
382  *
383  * Free the sync object.
384  */
385 void amdgpu_sync_free(struct amdgpu_sync *sync)
386 {
387 	struct amdgpu_sync_entry *e;
388 	struct hlist_node *tmp;
389 	unsigned i;
390 
391 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
392 		hash_del(&e->node);
393 		dma_fence_put(e->fence);
394 		kmem_cache_free(amdgpu_sync_slab, e);
395 	}
396 
397 	dma_fence_put(sync->last_vm_update);
398 }
399 
400 /**
401  * amdgpu_sync_init - init sync object subsystem
402  *
403  * Allocate the slab allocator.
404  */
405 int amdgpu_sync_init(void)
406 {
407 	amdgpu_sync_slab = kmem_cache_create(
408 		"amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
409 		SLAB_HWCACHE_ALIGN, NULL);
410 	if (!amdgpu_sync_slab)
411 		return -ENOMEM;
412 
413 	return 0;
414 }
415 
416 /**
417  * amdgpu_sync_fini - fini sync object subsystem
418  *
419  * Free the slab allocator.
420  */
421 void amdgpu_sync_fini(void)
422 {
423 	kmem_cache_destroy(amdgpu_sync_slab);
424 }
425