1d38ceaf9SAlex Deucher /*
2d38ceaf9SAlex Deucher  * Copyright 2011 Red Hat Inc.
3d38ceaf9SAlex Deucher  * All Rights Reserved.
4d38ceaf9SAlex Deucher  *
5d38ceaf9SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6d38ceaf9SAlex Deucher  * copy of this software and associated documentation files (the
7d38ceaf9SAlex Deucher  * "Software"), to deal in the Software without restriction, including
8d38ceaf9SAlex Deucher  * without limitation the rights to use, copy, modify, merge, publish,
9d38ceaf9SAlex Deucher  * distribute, sub license, and/or sell copies of the Software, and to
10d38ceaf9SAlex Deucher  * permit persons to whom the Software is furnished to do so, subject to
11d38ceaf9SAlex Deucher  * the following conditions:
12d38ceaf9SAlex Deucher  *
13d38ceaf9SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14d38ceaf9SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15d38ceaf9SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16d38ceaf9SAlex Deucher  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17d38ceaf9SAlex Deucher  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18d38ceaf9SAlex Deucher  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19d38ceaf9SAlex Deucher  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20d38ceaf9SAlex Deucher  *
21d38ceaf9SAlex Deucher  * The above copyright notice and this permission notice (including the
22d38ceaf9SAlex Deucher  * next paragraph) shall be included in all copies or substantial portions
23d38ceaf9SAlex Deucher  * of the Software.
24d38ceaf9SAlex Deucher  *
25d38ceaf9SAlex Deucher  */
26d38ceaf9SAlex Deucher /*
27d38ceaf9SAlex Deucher  * Authors:
28d38ceaf9SAlex Deucher  *    Jerome Glisse <glisse@freedesktop.org>
29d38ceaf9SAlex Deucher  */
30d38ceaf9SAlex Deucher /* Algorithm:
31d38ceaf9SAlex Deucher  *
32d38ceaf9SAlex Deucher  * We store the last allocated bo in "hole", we always try to allocate
33d38ceaf9SAlex Deucher  * after the last allocated bo. Principle is that in a linear GPU ring
34d38ceaf9SAlex Deucher  * progression was is after last is the oldest bo we allocated and thus
35d38ceaf9SAlex Deucher  * the first one that should no longer be in use by the GPU.
36d38ceaf9SAlex Deucher  *
37d38ceaf9SAlex Deucher  * If it's not the case we skip over the bo after last to the closest
38d38ceaf9SAlex Deucher  * done bo if such one exist. If none exist and we are not asked to
39d38ceaf9SAlex Deucher  * block we report failure to allocate.
40d38ceaf9SAlex Deucher  *
41d38ceaf9SAlex Deucher  * If we are asked to block we wait on all the oldest fence of all
42d38ceaf9SAlex Deucher  * rings. We just wait for any of those fence to complete.
43d38ceaf9SAlex Deucher  */
44d38ceaf9SAlex Deucher #include <drm/drmP.h>
45d38ceaf9SAlex Deucher #include "amdgpu.h"
46d38ceaf9SAlex Deucher 
47d38ceaf9SAlex Deucher static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
48d38ceaf9SAlex Deucher static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
49d38ceaf9SAlex Deucher 
50d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
51d38ceaf9SAlex Deucher 			      struct amdgpu_sa_manager *sa_manager,
52d38ceaf9SAlex Deucher 			      unsigned size, u32 align, u32 domain)
53d38ceaf9SAlex Deucher {
54d38ceaf9SAlex Deucher 	int i, r;
55d38ceaf9SAlex Deucher 
56d38ceaf9SAlex Deucher 	init_waitqueue_head(&sa_manager->wq);
57d38ceaf9SAlex Deucher 	sa_manager->bo = NULL;
58d38ceaf9SAlex Deucher 	sa_manager->size = size;
59d38ceaf9SAlex Deucher 	sa_manager->domain = domain;
60d38ceaf9SAlex Deucher 	sa_manager->align = align;
61d38ceaf9SAlex Deucher 	sa_manager->hole = &sa_manager->olist;
62d38ceaf9SAlex Deucher 	INIT_LIST_HEAD(&sa_manager->olist);
63d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
64d38ceaf9SAlex Deucher 		INIT_LIST_HEAD(&sa_manager->flist[i]);
65d38ceaf9SAlex Deucher 	}
66d38ceaf9SAlex Deucher 
67d38ceaf9SAlex Deucher 	r = amdgpu_bo_create(adev, size, align, true,
68d38ceaf9SAlex Deucher 			     domain, 0, NULL, &sa_manager->bo);
69d38ceaf9SAlex Deucher 	if (r) {
70d38ceaf9SAlex Deucher 		dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
71d38ceaf9SAlex Deucher 		return r;
72d38ceaf9SAlex Deucher 	}
73d38ceaf9SAlex Deucher 
74d38ceaf9SAlex Deucher 	return r;
75d38ceaf9SAlex Deucher }
76d38ceaf9SAlex Deucher 
77d38ceaf9SAlex Deucher void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
78d38ceaf9SAlex Deucher 			       struct amdgpu_sa_manager *sa_manager)
79d38ceaf9SAlex Deucher {
80d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *sa_bo, *tmp;
81d38ceaf9SAlex Deucher 
82d38ceaf9SAlex Deucher 	if (!list_empty(&sa_manager->olist)) {
83d38ceaf9SAlex Deucher 		sa_manager->hole = &sa_manager->olist,
84d38ceaf9SAlex Deucher 		amdgpu_sa_bo_try_free(sa_manager);
85d38ceaf9SAlex Deucher 		if (!list_empty(&sa_manager->olist)) {
86d38ceaf9SAlex Deucher 			dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
87d38ceaf9SAlex Deucher 		}
88d38ceaf9SAlex Deucher 	}
89d38ceaf9SAlex Deucher 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
90d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(sa_bo);
91d38ceaf9SAlex Deucher 	}
92d38ceaf9SAlex Deucher 	amdgpu_bo_unref(&sa_manager->bo);
93d38ceaf9SAlex Deucher 	sa_manager->size = 0;
94d38ceaf9SAlex Deucher }
95d38ceaf9SAlex Deucher 
96d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
97d38ceaf9SAlex Deucher 			       struct amdgpu_sa_manager *sa_manager)
98d38ceaf9SAlex Deucher {
99d38ceaf9SAlex Deucher 	int r;
100d38ceaf9SAlex Deucher 
101d38ceaf9SAlex Deucher 	if (sa_manager->bo == NULL) {
102d38ceaf9SAlex Deucher 		dev_err(adev->dev, "no bo for sa manager\n");
103d38ceaf9SAlex Deucher 		return -EINVAL;
104d38ceaf9SAlex Deucher 	}
105d38ceaf9SAlex Deucher 
106d38ceaf9SAlex Deucher 	/* map the buffer */
107d38ceaf9SAlex Deucher 	r = amdgpu_bo_reserve(sa_manager->bo, false);
108d38ceaf9SAlex Deucher 	if (r) {
109d38ceaf9SAlex Deucher 		dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
110d38ceaf9SAlex Deucher 		return r;
111d38ceaf9SAlex Deucher 	}
112d38ceaf9SAlex Deucher 	r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
113d38ceaf9SAlex Deucher 	if (r) {
114d38ceaf9SAlex Deucher 		amdgpu_bo_unreserve(sa_manager->bo);
115d38ceaf9SAlex Deucher 		dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
116d38ceaf9SAlex Deucher 		return r;
117d38ceaf9SAlex Deucher 	}
118d38ceaf9SAlex Deucher 	r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
119d38ceaf9SAlex Deucher 	amdgpu_bo_unreserve(sa_manager->bo);
120d38ceaf9SAlex Deucher 	return r;
121d38ceaf9SAlex Deucher }
122d38ceaf9SAlex Deucher 
123d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
124d38ceaf9SAlex Deucher 				 struct amdgpu_sa_manager *sa_manager)
125d38ceaf9SAlex Deucher {
126d38ceaf9SAlex Deucher 	int r;
127d38ceaf9SAlex Deucher 
128d38ceaf9SAlex Deucher 	if (sa_manager->bo == NULL) {
129d38ceaf9SAlex Deucher 		dev_err(adev->dev, "no bo for sa manager\n");
130d38ceaf9SAlex Deucher 		return -EINVAL;
131d38ceaf9SAlex Deucher 	}
132d38ceaf9SAlex Deucher 
133d38ceaf9SAlex Deucher 	r = amdgpu_bo_reserve(sa_manager->bo, false);
134d38ceaf9SAlex Deucher 	if (!r) {
135d38ceaf9SAlex Deucher 		amdgpu_bo_kunmap(sa_manager->bo);
136d38ceaf9SAlex Deucher 		amdgpu_bo_unpin(sa_manager->bo);
137d38ceaf9SAlex Deucher 		amdgpu_bo_unreserve(sa_manager->bo);
138d38ceaf9SAlex Deucher 	}
139d38ceaf9SAlex Deucher 	return r;
140d38ceaf9SAlex Deucher }
141d38ceaf9SAlex Deucher 
1424ce9891eSChunming Zhou static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
1434ce9891eSChunming Zhou {
1444ce9891eSChunming Zhou 	struct amdgpu_fence *a_fence;
1454ce9891eSChunming Zhou 	struct amd_sched_fence *s_fence;
1464ce9891eSChunming Zhou 
1474ce9891eSChunming Zhou 	s_fence = to_amd_sched_fence(f);
1484ce9891eSChunming Zhou 	if (s_fence)
1494ce9891eSChunming Zhou 		return s_fence->entity->scheduler->ring_id;
1504ce9891eSChunming Zhou 	a_fence = to_amdgpu_fence(f);
1514ce9891eSChunming Zhou 	if (a_fence)
1524ce9891eSChunming Zhou 		return a_fence->ring->idx;
1534ce9891eSChunming Zhou 	return 0;
1544ce9891eSChunming Zhou }
1554ce9891eSChunming Zhou 
156d38ceaf9SAlex Deucher static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
157d38ceaf9SAlex Deucher {
158d38ceaf9SAlex Deucher 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
159d38ceaf9SAlex Deucher 	if (sa_manager->hole == &sa_bo->olist) {
160d38ceaf9SAlex Deucher 		sa_manager->hole = sa_bo->olist.prev;
161d38ceaf9SAlex Deucher 	}
162d38ceaf9SAlex Deucher 	list_del_init(&sa_bo->olist);
163d38ceaf9SAlex Deucher 	list_del_init(&sa_bo->flist);
1644ce9891eSChunming Zhou 	fence_put(sa_bo->fence);
165d38ceaf9SAlex Deucher 	kfree(sa_bo);
166d38ceaf9SAlex Deucher }
167d38ceaf9SAlex Deucher 
168d38ceaf9SAlex Deucher static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
169d38ceaf9SAlex Deucher {
170d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *sa_bo, *tmp;
171d38ceaf9SAlex Deucher 
172d38ceaf9SAlex Deucher 	if (sa_manager->hole->next == &sa_manager->olist)
173d38ceaf9SAlex Deucher 		return;
174d38ceaf9SAlex Deucher 
175d38ceaf9SAlex Deucher 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
176d38ceaf9SAlex Deucher 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
1773cdb8119SChristian König 		if (sa_bo->fence == NULL ||
1784ce9891eSChunming Zhou 		    !fence_is_signaled(sa_bo->fence)) {
179d38ceaf9SAlex Deucher 			return;
180d38ceaf9SAlex Deucher 		}
181d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(sa_bo);
182d38ceaf9SAlex Deucher 	}
183d38ceaf9SAlex Deucher }
184d38ceaf9SAlex Deucher 
185d38ceaf9SAlex Deucher static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
186d38ceaf9SAlex Deucher {
187d38ceaf9SAlex Deucher 	struct list_head *hole = sa_manager->hole;
188d38ceaf9SAlex Deucher 
189d38ceaf9SAlex Deucher 	if (hole != &sa_manager->olist) {
190d38ceaf9SAlex Deucher 		return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
191d38ceaf9SAlex Deucher 	}
192d38ceaf9SAlex Deucher 	return 0;
193d38ceaf9SAlex Deucher }
194d38ceaf9SAlex Deucher 
195d38ceaf9SAlex Deucher static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
196d38ceaf9SAlex Deucher {
197d38ceaf9SAlex Deucher 	struct list_head *hole = sa_manager->hole;
198d38ceaf9SAlex Deucher 
199d38ceaf9SAlex Deucher 	if (hole->next != &sa_manager->olist) {
200d38ceaf9SAlex Deucher 		return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
201d38ceaf9SAlex Deucher 	}
202d38ceaf9SAlex Deucher 	return sa_manager->size;
203d38ceaf9SAlex Deucher }
204d38ceaf9SAlex Deucher 
205d38ceaf9SAlex Deucher static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
206d38ceaf9SAlex Deucher 				   struct amdgpu_sa_bo *sa_bo,
207d38ceaf9SAlex Deucher 				   unsigned size, unsigned align)
208d38ceaf9SAlex Deucher {
209d38ceaf9SAlex Deucher 	unsigned soffset, eoffset, wasted;
210d38ceaf9SAlex Deucher 
211d38ceaf9SAlex Deucher 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
212d38ceaf9SAlex Deucher 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
213d38ceaf9SAlex Deucher 	wasted = (align - (soffset % align)) % align;
214d38ceaf9SAlex Deucher 
215d38ceaf9SAlex Deucher 	if ((eoffset - soffset) >= (size + wasted)) {
216d38ceaf9SAlex Deucher 		soffset += wasted;
217d38ceaf9SAlex Deucher 
218d38ceaf9SAlex Deucher 		sa_bo->manager = sa_manager;
219d38ceaf9SAlex Deucher 		sa_bo->soffset = soffset;
220d38ceaf9SAlex Deucher 		sa_bo->eoffset = soffset + size;
221d38ceaf9SAlex Deucher 		list_add(&sa_bo->olist, sa_manager->hole);
222d38ceaf9SAlex Deucher 		INIT_LIST_HEAD(&sa_bo->flist);
223d38ceaf9SAlex Deucher 		sa_manager->hole = &sa_bo->olist;
224d38ceaf9SAlex Deucher 		return true;
225d38ceaf9SAlex Deucher 	}
226d38ceaf9SAlex Deucher 	return false;
227d38ceaf9SAlex Deucher }
228d38ceaf9SAlex Deucher 
229d38ceaf9SAlex Deucher /**
230d38ceaf9SAlex Deucher  * amdgpu_sa_event - Check if we can stop waiting
231d38ceaf9SAlex Deucher  *
232d38ceaf9SAlex Deucher  * @sa_manager: pointer to the sa_manager
233d38ceaf9SAlex Deucher  * @size: number of bytes we want to allocate
234d38ceaf9SAlex Deucher  * @align: alignment we need to match
235d38ceaf9SAlex Deucher  *
236d38ceaf9SAlex Deucher  * Check if either there is a fence we can wait for or
237d38ceaf9SAlex Deucher  * enough free memory to satisfy the allocation directly
238d38ceaf9SAlex Deucher  */
239d38ceaf9SAlex Deucher static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
240d38ceaf9SAlex Deucher 			    unsigned size, unsigned align)
241d38ceaf9SAlex Deucher {
242d38ceaf9SAlex Deucher 	unsigned soffset, eoffset, wasted;
243d38ceaf9SAlex Deucher 	int i;
244d38ceaf9SAlex Deucher 
245d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
246d38ceaf9SAlex Deucher 		if (!list_empty(&sa_manager->flist[i])) {
247d38ceaf9SAlex Deucher 			return true;
248d38ceaf9SAlex Deucher 		}
249d38ceaf9SAlex Deucher 	}
250d38ceaf9SAlex Deucher 
251d38ceaf9SAlex Deucher 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
252d38ceaf9SAlex Deucher 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
253d38ceaf9SAlex Deucher 	wasted = (align - (soffset % align)) % align;
254d38ceaf9SAlex Deucher 
255d38ceaf9SAlex Deucher 	if ((eoffset - soffset) >= (size + wasted)) {
256d38ceaf9SAlex Deucher 		return true;
257d38ceaf9SAlex Deucher 	}
258d38ceaf9SAlex Deucher 
259d38ceaf9SAlex Deucher 	return false;
260d38ceaf9SAlex Deucher }
261d38ceaf9SAlex Deucher 
262d38ceaf9SAlex Deucher static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
2634ce9891eSChunming Zhou 				   struct fence **fences,
264d38ceaf9SAlex Deucher 				   unsigned *tries)
265d38ceaf9SAlex Deucher {
266d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *best_bo = NULL;
267d38ceaf9SAlex Deucher 	unsigned i, soffset, best, tmp;
268d38ceaf9SAlex Deucher 
269d38ceaf9SAlex Deucher 	/* if hole points to the end of the buffer */
270d38ceaf9SAlex Deucher 	if (sa_manager->hole->next == &sa_manager->olist) {
271d38ceaf9SAlex Deucher 		/* try again with its beginning */
272d38ceaf9SAlex Deucher 		sa_manager->hole = &sa_manager->olist;
273d38ceaf9SAlex Deucher 		return true;
274d38ceaf9SAlex Deucher 	}
275d38ceaf9SAlex Deucher 
276d38ceaf9SAlex Deucher 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
277d38ceaf9SAlex Deucher 	/* to handle wrap around we add sa_manager->size */
278d38ceaf9SAlex Deucher 	best = sa_manager->size * 2;
279d38ceaf9SAlex Deucher 	/* go over all fence list and try to find the closest sa_bo
280d38ceaf9SAlex Deucher 	 * of the current last
281d38ceaf9SAlex Deucher 	 */
282d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
283d38ceaf9SAlex Deucher 		struct amdgpu_sa_bo *sa_bo;
284d38ceaf9SAlex Deucher 
285d38ceaf9SAlex Deucher 		if (list_empty(&sa_manager->flist[i])) {
286d38ceaf9SAlex Deucher 			continue;
287d38ceaf9SAlex Deucher 		}
288d38ceaf9SAlex Deucher 
289d38ceaf9SAlex Deucher 		sa_bo = list_first_entry(&sa_manager->flist[i],
290d38ceaf9SAlex Deucher 					 struct amdgpu_sa_bo, flist);
291d38ceaf9SAlex Deucher 
2924ce9891eSChunming Zhou 		if (!fence_is_signaled(sa_bo->fence)) {
293d38ceaf9SAlex Deucher 			fences[i] = sa_bo->fence;
294d38ceaf9SAlex Deucher 			continue;
295d38ceaf9SAlex Deucher 		}
296d38ceaf9SAlex Deucher 
297d38ceaf9SAlex Deucher 		/* limit the number of tries each ring gets */
298d38ceaf9SAlex Deucher 		if (tries[i] > 2) {
299d38ceaf9SAlex Deucher 			continue;
300d38ceaf9SAlex Deucher 		}
301d38ceaf9SAlex Deucher 
302d38ceaf9SAlex Deucher 		tmp = sa_bo->soffset;
303d38ceaf9SAlex Deucher 		if (tmp < soffset) {
304d38ceaf9SAlex Deucher 			/* wrap around, pretend it's after */
305d38ceaf9SAlex Deucher 			tmp += sa_manager->size;
306d38ceaf9SAlex Deucher 		}
307d38ceaf9SAlex Deucher 		tmp -= soffset;
308d38ceaf9SAlex Deucher 		if (tmp < best) {
309d38ceaf9SAlex Deucher 			/* this sa bo is the closest one */
310d38ceaf9SAlex Deucher 			best = tmp;
311d38ceaf9SAlex Deucher 			best_bo = sa_bo;
312d38ceaf9SAlex Deucher 		}
313d38ceaf9SAlex Deucher 	}
314d38ceaf9SAlex Deucher 
315d38ceaf9SAlex Deucher 	if (best_bo) {
3164ce9891eSChunming Zhou 		uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
3174ce9891eSChunming Zhou 		++tries[idx];
318d38ceaf9SAlex Deucher 		sa_manager->hole = best_bo->olist.prev;
319d38ceaf9SAlex Deucher 
320d38ceaf9SAlex Deucher 		/* we knew that this one is signaled,
321d38ceaf9SAlex Deucher 		   so it's save to remote it */
322d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(best_bo);
323d38ceaf9SAlex Deucher 		return true;
324d38ceaf9SAlex Deucher 	}
325d38ceaf9SAlex Deucher 	return false;
326d38ceaf9SAlex Deucher }
327d38ceaf9SAlex Deucher 
328d38ceaf9SAlex Deucher int amdgpu_sa_bo_new(struct amdgpu_device *adev,
329d38ceaf9SAlex Deucher 		     struct amdgpu_sa_manager *sa_manager,
330d38ceaf9SAlex Deucher 		     struct amdgpu_sa_bo **sa_bo,
331d38ceaf9SAlex Deucher 		     unsigned size, unsigned align)
332d38ceaf9SAlex Deucher {
3334ce9891eSChunming Zhou 	struct fence *fences[AMDGPU_MAX_RINGS];
334d38ceaf9SAlex Deucher 	unsigned tries[AMDGPU_MAX_RINGS];
335d38ceaf9SAlex Deucher 	int i, r;
336a8f5bf0bSmonk.liu 	signed long t;
337d38ceaf9SAlex Deucher 
338d38ceaf9SAlex Deucher 	BUG_ON(align > sa_manager->align);
339d38ceaf9SAlex Deucher 	BUG_ON(size > sa_manager->size);
340d38ceaf9SAlex Deucher 
341d38ceaf9SAlex Deucher 	*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
342d38ceaf9SAlex Deucher 	if ((*sa_bo) == NULL) {
343d38ceaf9SAlex Deucher 		return -ENOMEM;
344d38ceaf9SAlex Deucher 	}
345d38ceaf9SAlex Deucher 	(*sa_bo)->manager = sa_manager;
346d38ceaf9SAlex Deucher 	(*sa_bo)->fence = NULL;
347d38ceaf9SAlex Deucher 	INIT_LIST_HEAD(&(*sa_bo)->olist);
348d38ceaf9SAlex Deucher 	INIT_LIST_HEAD(&(*sa_bo)->flist);
349d38ceaf9SAlex Deucher 
350d38ceaf9SAlex Deucher 	spin_lock(&sa_manager->wq.lock);
351d38ceaf9SAlex Deucher 	do {
352d38ceaf9SAlex Deucher 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
353d38ceaf9SAlex Deucher 			fences[i] = NULL;
354d38ceaf9SAlex Deucher 			tries[i] = 0;
355d38ceaf9SAlex Deucher 		}
356d38ceaf9SAlex Deucher 
357d38ceaf9SAlex Deucher 		do {
358d38ceaf9SAlex Deucher 			amdgpu_sa_bo_try_free(sa_manager);
359d38ceaf9SAlex Deucher 
360d38ceaf9SAlex Deucher 			if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
361d38ceaf9SAlex Deucher 						   size, align)) {
362d38ceaf9SAlex Deucher 				spin_unlock(&sa_manager->wq.lock);
363d38ceaf9SAlex Deucher 				return 0;
364d38ceaf9SAlex Deucher 			}
365d38ceaf9SAlex Deucher 
366d38ceaf9SAlex Deucher 			/* see if we can skip over some allocations */
367d38ceaf9SAlex Deucher 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
368d38ceaf9SAlex Deucher 
369d38ceaf9SAlex Deucher 		spin_unlock(&sa_manager->wq.lock);
3701aa4051bSJunwei Zhang 		t = amdgpu_fence_wait_multiple(adev, fences, AMDGPU_MAX_RINGS, false, false,
3711aa4051bSJunwei Zhang 						MAX_SCHEDULE_TIMEOUT);
372a8f5bf0bSmonk.liu 		r = (t > 0) ? 0 : t;
373d38ceaf9SAlex Deucher 		spin_lock(&sa_manager->wq.lock);
374d38ceaf9SAlex Deucher 		/* if we have nothing to wait for block */
375d38ceaf9SAlex Deucher 		if (r == -ENOENT) {
376d38ceaf9SAlex Deucher 			r = wait_event_interruptible_locked(
377d38ceaf9SAlex Deucher 				sa_manager->wq,
378d38ceaf9SAlex Deucher 				amdgpu_sa_event(sa_manager, size, align)
379d38ceaf9SAlex Deucher 			);
380d38ceaf9SAlex Deucher 		}
381d38ceaf9SAlex Deucher 
382d38ceaf9SAlex Deucher 	} while (!r);
383d38ceaf9SAlex Deucher 
384d38ceaf9SAlex Deucher 	spin_unlock(&sa_manager->wq.lock);
385d38ceaf9SAlex Deucher 	kfree(*sa_bo);
386d38ceaf9SAlex Deucher 	*sa_bo = NULL;
387d38ceaf9SAlex Deucher 	return r;
388d38ceaf9SAlex Deucher }
389d38ceaf9SAlex Deucher 
390d38ceaf9SAlex Deucher void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
3914ce9891eSChunming Zhou 		       struct fence *fence)
392d38ceaf9SAlex Deucher {
393d38ceaf9SAlex Deucher 	struct amdgpu_sa_manager *sa_manager;
394d38ceaf9SAlex Deucher 
395d38ceaf9SAlex Deucher 	if (sa_bo == NULL || *sa_bo == NULL) {
396d38ceaf9SAlex Deucher 		return;
397d38ceaf9SAlex Deucher 	}
398d38ceaf9SAlex Deucher 
399d38ceaf9SAlex Deucher 	sa_manager = (*sa_bo)->manager;
400d38ceaf9SAlex Deucher 	spin_lock(&sa_manager->wq.lock);
4014ce9891eSChunming Zhou 	if (fence && !fence_is_signaled(fence)) {
4024ce9891eSChunming Zhou 		uint32_t idx;
4034ce9891eSChunming Zhou 		(*sa_bo)->fence = fence_get(fence);
4044ce9891eSChunming Zhou 		idx = amdgpu_sa_get_ring_from_fence(fence);
4054ce9891eSChunming Zhou 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
406d38ceaf9SAlex Deucher 	} else {
407d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(*sa_bo);
408d38ceaf9SAlex Deucher 	}
409d38ceaf9SAlex Deucher 	wake_up_all_locked(&sa_manager->wq);
410d38ceaf9SAlex Deucher 	spin_unlock(&sa_manager->wq.lock);
411d38ceaf9SAlex Deucher 	*sa_bo = NULL;
412d38ceaf9SAlex Deucher }
413d38ceaf9SAlex Deucher 
414d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS)
415d38ceaf9SAlex Deucher void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
416d38ceaf9SAlex Deucher 				  struct seq_file *m)
417d38ceaf9SAlex Deucher {
418d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *i;
419d38ceaf9SAlex Deucher 
420d38ceaf9SAlex Deucher 	spin_lock(&sa_manager->wq.lock);
421d38ceaf9SAlex Deucher 	list_for_each_entry(i, &sa_manager->olist, olist) {
422d38ceaf9SAlex Deucher 		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
423d38ceaf9SAlex Deucher 		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
424d38ceaf9SAlex Deucher 		if (&i->olist == sa_manager->hole) {
425d38ceaf9SAlex Deucher 			seq_printf(m, ">");
426d38ceaf9SAlex Deucher 		} else {
427d38ceaf9SAlex Deucher 			seq_printf(m, " ");
428d38ceaf9SAlex Deucher 		}
429d38ceaf9SAlex Deucher 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
430d38ceaf9SAlex Deucher 			   soffset, eoffset, eoffset - soffset);
431d38ceaf9SAlex Deucher 		if (i->fence) {
4324ce9891eSChunming Zhou 			struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
4334ce9891eSChunming Zhou 			struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
4344ce9891eSChunming Zhou 			if (a_fence)
435d38ceaf9SAlex Deucher 				seq_printf(m, " protected by 0x%016llx on ring %d",
4364ce9891eSChunming Zhou 					   a_fence->seq, a_fence->ring->idx);
4374ce9891eSChunming Zhou 			if (s_fence)
4384ce9891eSChunming Zhou 				seq_printf(m, " protected by 0x%016llx on ring %d",
4394ce9891eSChunming Zhou 					   s_fence->v_seq,
4404ce9891eSChunming Zhou 					   s_fence->entity->scheduler->ring_id);
4414ce9891eSChunming Zhou 
442d38ceaf9SAlex Deucher 		}
443d38ceaf9SAlex Deucher 		seq_printf(m, "\n");
444d38ceaf9SAlex Deucher 	}
445d38ceaf9SAlex Deucher 	spin_unlock(&sa_manager->wq.lock);
446d38ceaf9SAlex Deucher }
447d38ceaf9SAlex Deucher #endif
448