1d38ceaf9SAlex Deucher /*
2d38ceaf9SAlex Deucher  * Copyright 2011 Red Hat Inc.
3d38ceaf9SAlex Deucher  * All Rights Reserved.
4d38ceaf9SAlex Deucher  *
5d38ceaf9SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6d38ceaf9SAlex Deucher  * copy of this software and associated documentation files (the
7d38ceaf9SAlex Deucher  * "Software"), to deal in the Software without restriction, including
8d38ceaf9SAlex Deucher  * without limitation the rights to use, copy, modify, merge, publish,
9d38ceaf9SAlex Deucher  * distribute, sub license, and/or sell copies of the Software, and to
10d38ceaf9SAlex Deucher  * permit persons to whom the Software is furnished to do so, subject to
11d38ceaf9SAlex Deucher  * the following conditions:
12d38ceaf9SAlex Deucher  *
13d38ceaf9SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14d38ceaf9SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15d38ceaf9SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16d38ceaf9SAlex Deucher  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17d38ceaf9SAlex Deucher  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18d38ceaf9SAlex Deucher  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19d38ceaf9SAlex Deucher  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20d38ceaf9SAlex Deucher  *
21d38ceaf9SAlex Deucher  * The above copyright notice and this permission notice (including the
22d38ceaf9SAlex Deucher  * next paragraph) shall be included in all copies or substantial portions
23d38ceaf9SAlex Deucher  * of the Software.
24d38ceaf9SAlex Deucher  *
25d38ceaf9SAlex Deucher  */
26d38ceaf9SAlex Deucher /*
27d38ceaf9SAlex Deucher  * Authors:
28d38ceaf9SAlex Deucher  *    Jerome Glisse <glisse@freedesktop.org>
29d38ceaf9SAlex Deucher  */
30d38ceaf9SAlex Deucher /* Algorithm:
31d38ceaf9SAlex Deucher  *
32d38ceaf9SAlex Deucher  * We store the last allocated bo in "hole", we always try to allocate
33d38ceaf9SAlex Deucher  * after the last allocated bo. Principle is that in a linear GPU ring
34d38ceaf9SAlex Deucher  * progression was is after last is the oldest bo we allocated and thus
35d38ceaf9SAlex Deucher  * the first one that should no longer be in use by the GPU.
36d38ceaf9SAlex Deucher  *
37d38ceaf9SAlex Deucher  * If it's not the case we skip over the bo after last to the closest
38d38ceaf9SAlex Deucher  * done bo if such one exist. If none exist and we are not asked to
39d38ceaf9SAlex Deucher  * block we report failure to allocate.
40d38ceaf9SAlex Deucher  *
41d38ceaf9SAlex Deucher  * If we are asked to block we wait on all the oldest fence of all
42d38ceaf9SAlex Deucher  * rings. We just wait for any of those fence to complete.
43d38ceaf9SAlex Deucher  */
44d38ceaf9SAlex Deucher #include <drm/drmP.h>
45d38ceaf9SAlex Deucher #include "amdgpu.h"
46d38ceaf9SAlex Deucher 
47d38ceaf9SAlex Deucher static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
48d38ceaf9SAlex Deucher static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
49d38ceaf9SAlex Deucher 
50d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
51d38ceaf9SAlex Deucher 			      struct amdgpu_sa_manager *sa_manager,
52d38ceaf9SAlex Deucher 			      unsigned size, u32 align, u32 domain)
53d38ceaf9SAlex Deucher {
54d38ceaf9SAlex Deucher 	int i, r;
55d38ceaf9SAlex Deucher 
56d38ceaf9SAlex Deucher 	init_waitqueue_head(&sa_manager->wq);
57d38ceaf9SAlex Deucher 	sa_manager->bo = NULL;
58d38ceaf9SAlex Deucher 	sa_manager->size = size;
59d38ceaf9SAlex Deucher 	sa_manager->domain = domain;
60d38ceaf9SAlex Deucher 	sa_manager->align = align;
61d38ceaf9SAlex Deucher 	sa_manager->hole = &sa_manager->olist;
62d38ceaf9SAlex Deucher 	INIT_LIST_HEAD(&sa_manager->olist);
63d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
64d38ceaf9SAlex Deucher 		INIT_LIST_HEAD(&sa_manager->flist[i]);
65d38ceaf9SAlex Deucher 	}
66d38ceaf9SAlex Deucher 
67d38ceaf9SAlex Deucher 	r = amdgpu_bo_create(adev, size, align, true,
68d38ceaf9SAlex Deucher 			     domain, 0, NULL, &sa_manager->bo);
69d38ceaf9SAlex Deucher 	if (r) {
70d38ceaf9SAlex Deucher 		dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
71d38ceaf9SAlex Deucher 		return r;
72d38ceaf9SAlex Deucher 	}
73d38ceaf9SAlex Deucher 
74d38ceaf9SAlex Deucher 	return r;
75d38ceaf9SAlex Deucher }
76d38ceaf9SAlex Deucher 
77d38ceaf9SAlex Deucher void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
78d38ceaf9SAlex Deucher 			       struct amdgpu_sa_manager *sa_manager)
79d38ceaf9SAlex Deucher {
80d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *sa_bo, *tmp;
81d38ceaf9SAlex Deucher 
82d38ceaf9SAlex Deucher 	if (!list_empty(&sa_manager->olist)) {
83d38ceaf9SAlex Deucher 		sa_manager->hole = &sa_manager->olist,
84d38ceaf9SAlex Deucher 		amdgpu_sa_bo_try_free(sa_manager);
85d38ceaf9SAlex Deucher 		if (!list_empty(&sa_manager->olist)) {
86d38ceaf9SAlex Deucher 			dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
87d38ceaf9SAlex Deucher 		}
88d38ceaf9SAlex Deucher 	}
89d38ceaf9SAlex Deucher 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
90d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(sa_bo);
91d38ceaf9SAlex Deucher 	}
92d38ceaf9SAlex Deucher 	amdgpu_bo_unref(&sa_manager->bo);
93d38ceaf9SAlex Deucher 	sa_manager->size = 0;
94d38ceaf9SAlex Deucher }
95d38ceaf9SAlex Deucher 
96d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
97d38ceaf9SAlex Deucher 			       struct amdgpu_sa_manager *sa_manager)
98d38ceaf9SAlex Deucher {
99d38ceaf9SAlex Deucher 	int r;
100d38ceaf9SAlex Deucher 
101d38ceaf9SAlex Deucher 	if (sa_manager->bo == NULL) {
102d38ceaf9SAlex Deucher 		dev_err(adev->dev, "no bo for sa manager\n");
103d38ceaf9SAlex Deucher 		return -EINVAL;
104d38ceaf9SAlex Deucher 	}
105d38ceaf9SAlex Deucher 
106d38ceaf9SAlex Deucher 	/* map the buffer */
107d38ceaf9SAlex Deucher 	r = amdgpu_bo_reserve(sa_manager->bo, false);
108d38ceaf9SAlex Deucher 	if (r) {
109d38ceaf9SAlex Deucher 		dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
110d38ceaf9SAlex Deucher 		return r;
111d38ceaf9SAlex Deucher 	}
112d38ceaf9SAlex Deucher 	r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
113d38ceaf9SAlex Deucher 	if (r) {
114d38ceaf9SAlex Deucher 		amdgpu_bo_unreserve(sa_manager->bo);
115d38ceaf9SAlex Deucher 		dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
116d38ceaf9SAlex Deucher 		return r;
117d38ceaf9SAlex Deucher 	}
118d38ceaf9SAlex Deucher 	r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
119d38ceaf9SAlex Deucher 	amdgpu_bo_unreserve(sa_manager->bo);
120d38ceaf9SAlex Deucher 	return r;
121d38ceaf9SAlex Deucher }
122d38ceaf9SAlex Deucher 
123d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
124d38ceaf9SAlex Deucher 				 struct amdgpu_sa_manager *sa_manager)
125d38ceaf9SAlex Deucher {
126d38ceaf9SAlex Deucher 	int r;
127d38ceaf9SAlex Deucher 
128d38ceaf9SAlex Deucher 	if (sa_manager->bo == NULL) {
129d38ceaf9SAlex Deucher 		dev_err(adev->dev, "no bo for sa manager\n");
130d38ceaf9SAlex Deucher 		return -EINVAL;
131d38ceaf9SAlex Deucher 	}
132d38ceaf9SAlex Deucher 
133d38ceaf9SAlex Deucher 	r = amdgpu_bo_reserve(sa_manager->bo, false);
134d38ceaf9SAlex Deucher 	if (!r) {
135d38ceaf9SAlex Deucher 		amdgpu_bo_kunmap(sa_manager->bo);
136d38ceaf9SAlex Deucher 		amdgpu_bo_unpin(sa_manager->bo);
137d38ceaf9SAlex Deucher 		amdgpu_bo_unreserve(sa_manager->bo);
138d38ceaf9SAlex Deucher 	}
139d38ceaf9SAlex Deucher 	return r;
140d38ceaf9SAlex Deucher }
141d38ceaf9SAlex Deucher 
142d38ceaf9SAlex Deucher static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
143d38ceaf9SAlex Deucher {
144d38ceaf9SAlex Deucher 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
145d38ceaf9SAlex Deucher 	if (sa_manager->hole == &sa_bo->olist) {
146d38ceaf9SAlex Deucher 		sa_manager->hole = sa_bo->olist.prev;
147d38ceaf9SAlex Deucher 	}
148d38ceaf9SAlex Deucher 	list_del_init(&sa_bo->olist);
149d38ceaf9SAlex Deucher 	list_del_init(&sa_bo->flist);
150d38ceaf9SAlex Deucher 	amdgpu_fence_unref(&sa_bo->fence);
151d38ceaf9SAlex Deucher 	kfree(sa_bo);
152d38ceaf9SAlex Deucher }
153d38ceaf9SAlex Deucher 
154d38ceaf9SAlex Deucher static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
155d38ceaf9SAlex Deucher {
156d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *sa_bo, *tmp;
157d38ceaf9SAlex Deucher 
158d38ceaf9SAlex Deucher 	if (sa_manager->hole->next == &sa_manager->olist)
159d38ceaf9SAlex Deucher 		return;
160d38ceaf9SAlex Deucher 
161d38ceaf9SAlex Deucher 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
162d38ceaf9SAlex Deucher 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
1633cdb8119SChristian König 		if (sa_bo->fence == NULL ||
1643cdb8119SChristian König 		    !fence_is_signaled(&sa_bo->fence->base)) {
165d38ceaf9SAlex Deucher 			return;
166d38ceaf9SAlex Deucher 		}
167d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(sa_bo);
168d38ceaf9SAlex Deucher 	}
169d38ceaf9SAlex Deucher }
170d38ceaf9SAlex Deucher 
171d38ceaf9SAlex Deucher static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
172d38ceaf9SAlex Deucher {
173d38ceaf9SAlex Deucher 	struct list_head *hole = sa_manager->hole;
174d38ceaf9SAlex Deucher 
175d38ceaf9SAlex Deucher 	if (hole != &sa_manager->olist) {
176d38ceaf9SAlex Deucher 		return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
177d38ceaf9SAlex Deucher 	}
178d38ceaf9SAlex Deucher 	return 0;
179d38ceaf9SAlex Deucher }
180d38ceaf9SAlex Deucher 
181d38ceaf9SAlex Deucher static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
182d38ceaf9SAlex Deucher {
183d38ceaf9SAlex Deucher 	struct list_head *hole = sa_manager->hole;
184d38ceaf9SAlex Deucher 
185d38ceaf9SAlex Deucher 	if (hole->next != &sa_manager->olist) {
186d38ceaf9SAlex Deucher 		return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
187d38ceaf9SAlex Deucher 	}
188d38ceaf9SAlex Deucher 	return sa_manager->size;
189d38ceaf9SAlex Deucher }
190d38ceaf9SAlex Deucher 
191d38ceaf9SAlex Deucher static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
192d38ceaf9SAlex Deucher 				   struct amdgpu_sa_bo *sa_bo,
193d38ceaf9SAlex Deucher 				   unsigned size, unsigned align)
194d38ceaf9SAlex Deucher {
195d38ceaf9SAlex Deucher 	unsigned soffset, eoffset, wasted;
196d38ceaf9SAlex Deucher 
197d38ceaf9SAlex Deucher 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
198d38ceaf9SAlex Deucher 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
199d38ceaf9SAlex Deucher 	wasted = (align - (soffset % align)) % align;
200d38ceaf9SAlex Deucher 
201d38ceaf9SAlex Deucher 	if ((eoffset - soffset) >= (size + wasted)) {
202d38ceaf9SAlex Deucher 		soffset += wasted;
203d38ceaf9SAlex Deucher 
204d38ceaf9SAlex Deucher 		sa_bo->manager = sa_manager;
205d38ceaf9SAlex Deucher 		sa_bo->soffset = soffset;
206d38ceaf9SAlex Deucher 		sa_bo->eoffset = soffset + size;
207d38ceaf9SAlex Deucher 		list_add(&sa_bo->olist, sa_manager->hole);
208d38ceaf9SAlex Deucher 		INIT_LIST_HEAD(&sa_bo->flist);
209d38ceaf9SAlex Deucher 		sa_manager->hole = &sa_bo->olist;
210d38ceaf9SAlex Deucher 		return true;
211d38ceaf9SAlex Deucher 	}
212d38ceaf9SAlex Deucher 	return false;
213d38ceaf9SAlex Deucher }
214d38ceaf9SAlex Deucher 
215d38ceaf9SAlex Deucher /**
216d38ceaf9SAlex Deucher  * amdgpu_sa_event - Check if we can stop waiting
217d38ceaf9SAlex Deucher  *
218d38ceaf9SAlex Deucher  * @sa_manager: pointer to the sa_manager
219d38ceaf9SAlex Deucher  * @size: number of bytes we want to allocate
220d38ceaf9SAlex Deucher  * @align: alignment we need to match
221d38ceaf9SAlex Deucher  *
222d38ceaf9SAlex Deucher  * Check if either there is a fence we can wait for or
223d38ceaf9SAlex Deucher  * enough free memory to satisfy the allocation directly
224d38ceaf9SAlex Deucher  */
225d38ceaf9SAlex Deucher static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
226d38ceaf9SAlex Deucher 			    unsigned size, unsigned align)
227d38ceaf9SAlex Deucher {
228d38ceaf9SAlex Deucher 	unsigned soffset, eoffset, wasted;
229d38ceaf9SAlex Deucher 	int i;
230d38ceaf9SAlex Deucher 
231d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
232d38ceaf9SAlex Deucher 		if (!list_empty(&sa_manager->flist[i])) {
233d38ceaf9SAlex Deucher 			return true;
234d38ceaf9SAlex Deucher 		}
235d38ceaf9SAlex Deucher 	}
236d38ceaf9SAlex Deucher 
237d38ceaf9SAlex Deucher 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
238d38ceaf9SAlex Deucher 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
239d38ceaf9SAlex Deucher 	wasted = (align - (soffset % align)) % align;
240d38ceaf9SAlex Deucher 
241d38ceaf9SAlex Deucher 	if ((eoffset - soffset) >= (size + wasted)) {
242d38ceaf9SAlex Deucher 		return true;
243d38ceaf9SAlex Deucher 	}
244d38ceaf9SAlex Deucher 
245d38ceaf9SAlex Deucher 	return false;
246d38ceaf9SAlex Deucher }
247d38ceaf9SAlex Deucher 
248d38ceaf9SAlex Deucher static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
249d38ceaf9SAlex Deucher 				   struct amdgpu_fence **fences,
250d38ceaf9SAlex Deucher 				   unsigned *tries)
251d38ceaf9SAlex Deucher {
252d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *best_bo = NULL;
253d38ceaf9SAlex Deucher 	unsigned i, soffset, best, tmp;
254d38ceaf9SAlex Deucher 
255d38ceaf9SAlex Deucher 	/* if hole points to the end of the buffer */
256d38ceaf9SAlex Deucher 	if (sa_manager->hole->next == &sa_manager->olist) {
257d38ceaf9SAlex Deucher 		/* try again with its beginning */
258d38ceaf9SAlex Deucher 		sa_manager->hole = &sa_manager->olist;
259d38ceaf9SAlex Deucher 		return true;
260d38ceaf9SAlex Deucher 	}
261d38ceaf9SAlex Deucher 
262d38ceaf9SAlex Deucher 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
263d38ceaf9SAlex Deucher 	/* to handle wrap around we add sa_manager->size */
264d38ceaf9SAlex Deucher 	best = sa_manager->size * 2;
265d38ceaf9SAlex Deucher 	/* go over all fence list and try to find the closest sa_bo
266d38ceaf9SAlex Deucher 	 * of the current last
267d38ceaf9SAlex Deucher 	 */
268d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
269d38ceaf9SAlex Deucher 		struct amdgpu_sa_bo *sa_bo;
270d38ceaf9SAlex Deucher 
271d38ceaf9SAlex Deucher 		if (list_empty(&sa_manager->flist[i])) {
272d38ceaf9SAlex Deucher 			continue;
273d38ceaf9SAlex Deucher 		}
274d38ceaf9SAlex Deucher 
275d38ceaf9SAlex Deucher 		sa_bo = list_first_entry(&sa_manager->flist[i],
276d38ceaf9SAlex Deucher 					 struct amdgpu_sa_bo, flist);
277d38ceaf9SAlex Deucher 
2783cdb8119SChristian König 		if (!fence_is_signaled(&sa_bo->fence->base)) {
279d38ceaf9SAlex Deucher 			fences[i] = sa_bo->fence;
280d38ceaf9SAlex Deucher 			continue;
281d38ceaf9SAlex Deucher 		}
282d38ceaf9SAlex Deucher 
283d38ceaf9SAlex Deucher 		/* limit the number of tries each ring gets */
284d38ceaf9SAlex Deucher 		if (tries[i] > 2) {
285d38ceaf9SAlex Deucher 			continue;
286d38ceaf9SAlex Deucher 		}
287d38ceaf9SAlex Deucher 
288d38ceaf9SAlex Deucher 		tmp = sa_bo->soffset;
289d38ceaf9SAlex Deucher 		if (tmp < soffset) {
290d38ceaf9SAlex Deucher 			/* wrap around, pretend it's after */
291d38ceaf9SAlex Deucher 			tmp += sa_manager->size;
292d38ceaf9SAlex Deucher 		}
293d38ceaf9SAlex Deucher 		tmp -= soffset;
294d38ceaf9SAlex Deucher 		if (tmp < best) {
295d38ceaf9SAlex Deucher 			/* this sa bo is the closest one */
296d38ceaf9SAlex Deucher 			best = tmp;
297d38ceaf9SAlex Deucher 			best_bo = sa_bo;
298d38ceaf9SAlex Deucher 		}
299d38ceaf9SAlex Deucher 	}
300d38ceaf9SAlex Deucher 
301d38ceaf9SAlex Deucher 	if (best_bo) {
302d38ceaf9SAlex Deucher 		++tries[best_bo->fence->ring->idx];
303d38ceaf9SAlex Deucher 		sa_manager->hole = best_bo->olist.prev;
304d38ceaf9SAlex Deucher 
305d38ceaf9SAlex Deucher 		/* we knew that this one is signaled,
306d38ceaf9SAlex Deucher 		   so it's save to remote it */
307d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(best_bo);
308d38ceaf9SAlex Deucher 		return true;
309d38ceaf9SAlex Deucher 	}
310d38ceaf9SAlex Deucher 	return false;
311d38ceaf9SAlex Deucher }
312d38ceaf9SAlex Deucher 
313d38ceaf9SAlex Deucher int amdgpu_sa_bo_new(struct amdgpu_device *adev,
314d38ceaf9SAlex Deucher 		     struct amdgpu_sa_manager *sa_manager,
315d38ceaf9SAlex Deucher 		     struct amdgpu_sa_bo **sa_bo,
316d38ceaf9SAlex Deucher 		     unsigned size, unsigned align)
317d38ceaf9SAlex Deucher {
318d38ceaf9SAlex Deucher 	struct amdgpu_fence *fences[AMDGPU_MAX_RINGS];
319d38ceaf9SAlex Deucher 	unsigned tries[AMDGPU_MAX_RINGS];
320d38ceaf9SAlex Deucher 	int i, r;
321a8f5bf0bSmonk.liu 	signed long t;
322d38ceaf9SAlex Deucher 
323d38ceaf9SAlex Deucher 	BUG_ON(align > sa_manager->align);
324d38ceaf9SAlex Deucher 	BUG_ON(size > sa_manager->size);
325d38ceaf9SAlex Deucher 
326d38ceaf9SAlex Deucher 	*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
327d38ceaf9SAlex Deucher 	if ((*sa_bo) == NULL) {
328d38ceaf9SAlex Deucher 		return -ENOMEM;
329d38ceaf9SAlex Deucher 	}
330d38ceaf9SAlex Deucher 	(*sa_bo)->manager = sa_manager;
331d38ceaf9SAlex Deucher 	(*sa_bo)->fence = NULL;
332d38ceaf9SAlex Deucher 	INIT_LIST_HEAD(&(*sa_bo)->olist);
333d38ceaf9SAlex Deucher 	INIT_LIST_HEAD(&(*sa_bo)->flist);
334d38ceaf9SAlex Deucher 
335d38ceaf9SAlex Deucher 	spin_lock(&sa_manager->wq.lock);
336d38ceaf9SAlex Deucher 	do {
337d38ceaf9SAlex Deucher 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
338d38ceaf9SAlex Deucher 			fences[i] = NULL;
339d38ceaf9SAlex Deucher 			tries[i] = 0;
340d38ceaf9SAlex Deucher 		}
341d38ceaf9SAlex Deucher 
342d38ceaf9SAlex Deucher 		do {
343d38ceaf9SAlex Deucher 			amdgpu_sa_bo_try_free(sa_manager);
344d38ceaf9SAlex Deucher 
345d38ceaf9SAlex Deucher 			if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
346d38ceaf9SAlex Deucher 						   size, align)) {
347d38ceaf9SAlex Deucher 				spin_unlock(&sa_manager->wq.lock);
348d38ceaf9SAlex Deucher 				return 0;
349d38ceaf9SAlex Deucher 			}
350d38ceaf9SAlex Deucher 
351d38ceaf9SAlex Deucher 			/* see if we can skip over some allocations */
352d38ceaf9SAlex Deucher 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
353d38ceaf9SAlex Deucher 
354d38ceaf9SAlex Deucher 		spin_unlock(&sa_manager->wq.lock);
355a8f5bf0bSmonk.liu 		t = amdgpu_fence_wait_any(adev, fences, false, MAX_SCHEDULE_TIMEOUT);
356a8f5bf0bSmonk.liu 		r = (t > 0) ? 0 : t;
357d38ceaf9SAlex Deucher 		spin_lock(&sa_manager->wq.lock);
358d38ceaf9SAlex Deucher 		/* if we have nothing to wait for block */
359d38ceaf9SAlex Deucher 		if (r == -ENOENT) {
360d38ceaf9SAlex Deucher 			r = wait_event_interruptible_locked(
361d38ceaf9SAlex Deucher 				sa_manager->wq,
362d38ceaf9SAlex Deucher 				amdgpu_sa_event(sa_manager, size, align)
363d38ceaf9SAlex Deucher 			);
364d38ceaf9SAlex Deucher 		}
365d38ceaf9SAlex Deucher 
366d38ceaf9SAlex Deucher 	} while (!r);
367d38ceaf9SAlex Deucher 
368d38ceaf9SAlex Deucher 	spin_unlock(&sa_manager->wq.lock);
369d38ceaf9SAlex Deucher 	kfree(*sa_bo);
370d38ceaf9SAlex Deucher 	*sa_bo = NULL;
371d38ceaf9SAlex Deucher 	return r;
372d38ceaf9SAlex Deucher }
373d38ceaf9SAlex Deucher 
374d38ceaf9SAlex Deucher void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
375d38ceaf9SAlex Deucher 		       struct amdgpu_fence *fence)
376d38ceaf9SAlex Deucher {
377d38ceaf9SAlex Deucher 	struct amdgpu_sa_manager *sa_manager;
378d38ceaf9SAlex Deucher 
379d38ceaf9SAlex Deucher 	if (sa_bo == NULL || *sa_bo == NULL) {
380d38ceaf9SAlex Deucher 		return;
381d38ceaf9SAlex Deucher 	}
382d38ceaf9SAlex Deucher 
383d38ceaf9SAlex Deucher 	sa_manager = (*sa_bo)->manager;
384d38ceaf9SAlex Deucher 	spin_lock(&sa_manager->wq.lock);
3853cdb8119SChristian König 	if (fence && !fence_is_signaled(&fence->base)) {
386d38ceaf9SAlex Deucher 		(*sa_bo)->fence = amdgpu_fence_ref(fence);
387d38ceaf9SAlex Deucher 		list_add_tail(&(*sa_bo)->flist,
388d38ceaf9SAlex Deucher 			      &sa_manager->flist[fence->ring->idx]);
389d38ceaf9SAlex Deucher 	} else {
390d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(*sa_bo);
391d38ceaf9SAlex Deucher 	}
392d38ceaf9SAlex Deucher 	wake_up_all_locked(&sa_manager->wq);
393d38ceaf9SAlex Deucher 	spin_unlock(&sa_manager->wq.lock);
394d38ceaf9SAlex Deucher 	*sa_bo = NULL;
395d38ceaf9SAlex Deucher }
396d38ceaf9SAlex Deucher 
397d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS)
398d38ceaf9SAlex Deucher void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
399d38ceaf9SAlex Deucher 				  struct seq_file *m)
400d38ceaf9SAlex Deucher {
401d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *i;
402d38ceaf9SAlex Deucher 
403d38ceaf9SAlex Deucher 	spin_lock(&sa_manager->wq.lock);
404d38ceaf9SAlex Deucher 	list_for_each_entry(i, &sa_manager->olist, olist) {
405d38ceaf9SAlex Deucher 		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
406d38ceaf9SAlex Deucher 		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
407d38ceaf9SAlex Deucher 		if (&i->olist == sa_manager->hole) {
408d38ceaf9SAlex Deucher 			seq_printf(m, ">");
409d38ceaf9SAlex Deucher 		} else {
410d38ceaf9SAlex Deucher 			seq_printf(m, " ");
411d38ceaf9SAlex Deucher 		}
412d38ceaf9SAlex Deucher 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
413d38ceaf9SAlex Deucher 			   soffset, eoffset, eoffset - soffset);
414d38ceaf9SAlex Deucher 		if (i->fence) {
415d38ceaf9SAlex Deucher 			seq_printf(m, " protected by 0x%016llx on ring %d",
416d38ceaf9SAlex Deucher 				   i->fence->seq, i->fence->ring->idx);
417d38ceaf9SAlex Deucher 		}
418d38ceaf9SAlex Deucher 		seq_printf(m, "\n");
419d38ceaf9SAlex Deucher 	}
420d38ceaf9SAlex Deucher 	spin_unlock(&sa_manager->wq.lock);
421d38ceaf9SAlex Deucher }
422d38ceaf9SAlex Deucher #endif
423