1d38ceaf9SAlex Deucher /*
2d38ceaf9SAlex Deucher  * Copyright 2011 Red Hat Inc.
3d38ceaf9SAlex Deucher  * All Rights Reserved.
4d38ceaf9SAlex Deucher  *
5d38ceaf9SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6d38ceaf9SAlex Deucher  * copy of this software and associated documentation files (the
7d38ceaf9SAlex Deucher  * "Software"), to deal in the Software without restriction, including
8d38ceaf9SAlex Deucher  * without limitation the rights to use, copy, modify, merge, publish,
9d38ceaf9SAlex Deucher  * distribute, sub license, and/or sell copies of the Software, and to
10d38ceaf9SAlex Deucher  * permit persons to whom the Software is furnished to do so, subject to
11d38ceaf9SAlex Deucher  * the following conditions:
12d38ceaf9SAlex Deucher  *
13d38ceaf9SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14d38ceaf9SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15d38ceaf9SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16d38ceaf9SAlex Deucher  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17d38ceaf9SAlex Deucher  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18d38ceaf9SAlex Deucher  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19d38ceaf9SAlex Deucher  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20d38ceaf9SAlex Deucher  *
21d38ceaf9SAlex Deucher  * The above copyright notice and this permission notice (including the
22d38ceaf9SAlex Deucher  * next paragraph) shall be included in all copies or substantial portions
23d38ceaf9SAlex Deucher  * of the Software.
24d38ceaf9SAlex Deucher  *
25d38ceaf9SAlex Deucher  */
26d38ceaf9SAlex Deucher /*
27d38ceaf9SAlex Deucher  * Authors:
28d38ceaf9SAlex Deucher  *    Jerome Glisse <glisse@freedesktop.org>
29d38ceaf9SAlex Deucher  */
30d38ceaf9SAlex Deucher /* Algorithm:
31d38ceaf9SAlex Deucher  *
32d38ceaf9SAlex Deucher  * We store the last allocated bo in "hole", we always try to allocate
33d38ceaf9SAlex Deucher  * after the last allocated bo. Principle is that in a linear GPU ring
34d38ceaf9SAlex Deucher  * progression was is after last is the oldest bo we allocated and thus
35d38ceaf9SAlex Deucher  * the first one that should no longer be in use by the GPU.
36d38ceaf9SAlex Deucher  *
37d38ceaf9SAlex Deucher  * If it's not the case we skip over the bo after last to the closest
38d38ceaf9SAlex Deucher  * done bo if such one exist. If none exist and we are not asked to
39d38ceaf9SAlex Deucher  * block we report failure to allocate.
40d38ceaf9SAlex Deucher  *
41d38ceaf9SAlex Deucher  * If we are asked to block we wait on all the oldest fence of all
42d38ceaf9SAlex Deucher  * rings. We just wait for any of those fence to complete.
43d38ceaf9SAlex Deucher  */
44d38ceaf9SAlex Deucher #include <drm/drmP.h>
45d38ceaf9SAlex Deucher #include "amdgpu.h"
46d38ceaf9SAlex Deucher 
47d38ceaf9SAlex Deucher static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
48d38ceaf9SAlex Deucher static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
49d38ceaf9SAlex Deucher 
50d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
51d38ceaf9SAlex Deucher 			      struct amdgpu_sa_manager *sa_manager,
52d38ceaf9SAlex Deucher 			      unsigned size, u32 align, u32 domain)
53d38ceaf9SAlex Deucher {
54d38ceaf9SAlex Deucher 	int i, r;
55d38ceaf9SAlex Deucher 
56d38ceaf9SAlex Deucher 	init_waitqueue_head(&sa_manager->wq);
57d38ceaf9SAlex Deucher 	sa_manager->bo = NULL;
58d38ceaf9SAlex Deucher 	sa_manager->size = size;
59d38ceaf9SAlex Deucher 	sa_manager->domain = domain;
60d38ceaf9SAlex Deucher 	sa_manager->align = align;
61d38ceaf9SAlex Deucher 	sa_manager->hole = &sa_manager->olist;
62d38ceaf9SAlex Deucher 	INIT_LIST_HEAD(&sa_manager->olist);
63d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
64d38ceaf9SAlex Deucher 		INIT_LIST_HEAD(&sa_manager->flist[i]);
65d38ceaf9SAlex Deucher 	}
66d38ceaf9SAlex Deucher 
6772d7668bSChristian König 	r = amdgpu_bo_create(adev, size, align, true, domain,
6872d7668bSChristian König 			     0, NULL, NULL, &sa_manager->bo);
69d38ceaf9SAlex Deucher 	if (r) {
70d38ceaf9SAlex Deucher 		dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
71d38ceaf9SAlex Deucher 		return r;
72d38ceaf9SAlex Deucher 	}
73d38ceaf9SAlex Deucher 
74d38ceaf9SAlex Deucher 	return r;
75d38ceaf9SAlex Deucher }
76d38ceaf9SAlex Deucher 
77d38ceaf9SAlex Deucher void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
78d38ceaf9SAlex Deucher 			       struct amdgpu_sa_manager *sa_manager)
79d38ceaf9SAlex Deucher {
80d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *sa_bo, *tmp;
81d38ceaf9SAlex Deucher 
82d38ceaf9SAlex Deucher 	if (!list_empty(&sa_manager->olist)) {
83d38ceaf9SAlex Deucher 		sa_manager->hole = &sa_manager->olist,
84d38ceaf9SAlex Deucher 		amdgpu_sa_bo_try_free(sa_manager);
85d38ceaf9SAlex Deucher 		if (!list_empty(&sa_manager->olist)) {
86d38ceaf9SAlex Deucher 			dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
87d38ceaf9SAlex Deucher 		}
88d38ceaf9SAlex Deucher 	}
89d38ceaf9SAlex Deucher 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
90d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(sa_bo);
91d38ceaf9SAlex Deucher 	}
92d38ceaf9SAlex Deucher 	amdgpu_bo_unref(&sa_manager->bo);
93d38ceaf9SAlex Deucher 	sa_manager->size = 0;
94d38ceaf9SAlex Deucher }
95d38ceaf9SAlex Deucher 
96d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
97d38ceaf9SAlex Deucher 			       struct amdgpu_sa_manager *sa_manager)
98d38ceaf9SAlex Deucher {
99d38ceaf9SAlex Deucher 	int r;
100d38ceaf9SAlex Deucher 
101d38ceaf9SAlex Deucher 	if (sa_manager->bo == NULL) {
102d38ceaf9SAlex Deucher 		dev_err(adev->dev, "no bo for sa manager\n");
103d38ceaf9SAlex Deucher 		return -EINVAL;
104d38ceaf9SAlex Deucher 	}
105d38ceaf9SAlex Deucher 
106d38ceaf9SAlex Deucher 	/* map the buffer */
107d38ceaf9SAlex Deucher 	r = amdgpu_bo_reserve(sa_manager->bo, false);
108d38ceaf9SAlex Deucher 	if (r) {
109d38ceaf9SAlex Deucher 		dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
110d38ceaf9SAlex Deucher 		return r;
111d38ceaf9SAlex Deucher 	}
112d38ceaf9SAlex Deucher 	r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
113d38ceaf9SAlex Deucher 	if (r) {
114d38ceaf9SAlex Deucher 		amdgpu_bo_unreserve(sa_manager->bo);
115d38ceaf9SAlex Deucher 		dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
116d38ceaf9SAlex Deucher 		return r;
117d38ceaf9SAlex Deucher 	}
118d38ceaf9SAlex Deucher 	r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
119d38ceaf9SAlex Deucher 	amdgpu_bo_unreserve(sa_manager->bo);
120d38ceaf9SAlex Deucher 	return r;
121d38ceaf9SAlex Deucher }
122d38ceaf9SAlex Deucher 
123d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
124d38ceaf9SAlex Deucher 				 struct amdgpu_sa_manager *sa_manager)
125d38ceaf9SAlex Deucher {
126d38ceaf9SAlex Deucher 	int r;
127d38ceaf9SAlex Deucher 
128d38ceaf9SAlex Deucher 	if (sa_manager->bo == NULL) {
129d38ceaf9SAlex Deucher 		dev_err(adev->dev, "no bo for sa manager\n");
130d38ceaf9SAlex Deucher 		return -EINVAL;
131d38ceaf9SAlex Deucher 	}
132d38ceaf9SAlex Deucher 
133d38ceaf9SAlex Deucher 	r = amdgpu_bo_reserve(sa_manager->bo, false);
134d38ceaf9SAlex Deucher 	if (!r) {
135d38ceaf9SAlex Deucher 		amdgpu_bo_kunmap(sa_manager->bo);
136d38ceaf9SAlex Deucher 		amdgpu_bo_unpin(sa_manager->bo);
137d38ceaf9SAlex Deucher 		amdgpu_bo_unreserve(sa_manager->bo);
138d38ceaf9SAlex Deucher 	}
139d38ceaf9SAlex Deucher 	return r;
140d38ceaf9SAlex Deucher }
141d38ceaf9SAlex Deucher 
142d38ceaf9SAlex Deucher static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
143d38ceaf9SAlex Deucher {
144d38ceaf9SAlex Deucher 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
145d38ceaf9SAlex Deucher 	if (sa_manager->hole == &sa_bo->olist) {
146d38ceaf9SAlex Deucher 		sa_manager->hole = sa_bo->olist.prev;
147d38ceaf9SAlex Deucher 	}
148d38ceaf9SAlex Deucher 	list_del_init(&sa_bo->olist);
149d38ceaf9SAlex Deucher 	list_del_init(&sa_bo->flist);
1504ce9891eSChunming Zhou 	fence_put(sa_bo->fence);
151d38ceaf9SAlex Deucher 	kfree(sa_bo);
152d38ceaf9SAlex Deucher }
153d38ceaf9SAlex Deucher 
154d38ceaf9SAlex Deucher static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
155d38ceaf9SAlex Deucher {
156d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *sa_bo, *tmp;
157d38ceaf9SAlex Deucher 
158d38ceaf9SAlex Deucher 	if (sa_manager->hole->next == &sa_manager->olist)
159d38ceaf9SAlex Deucher 		return;
160d38ceaf9SAlex Deucher 
161d38ceaf9SAlex Deucher 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
162d38ceaf9SAlex Deucher 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
1633cdb8119SChristian König 		if (sa_bo->fence == NULL ||
1644ce9891eSChunming Zhou 		    !fence_is_signaled(sa_bo->fence)) {
165d38ceaf9SAlex Deucher 			return;
166d38ceaf9SAlex Deucher 		}
167d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(sa_bo);
168d38ceaf9SAlex Deucher 	}
169d38ceaf9SAlex Deucher }
170d38ceaf9SAlex Deucher 
171d38ceaf9SAlex Deucher static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
172d38ceaf9SAlex Deucher {
173d38ceaf9SAlex Deucher 	struct list_head *hole = sa_manager->hole;
174d38ceaf9SAlex Deucher 
175d38ceaf9SAlex Deucher 	if (hole != &sa_manager->olist) {
176d38ceaf9SAlex Deucher 		return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
177d38ceaf9SAlex Deucher 	}
178d38ceaf9SAlex Deucher 	return 0;
179d38ceaf9SAlex Deucher }
180d38ceaf9SAlex Deucher 
181d38ceaf9SAlex Deucher static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
182d38ceaf9SAlex Deucher {
183d38ceaf9SAlex Deucher 	struct list_head *hole = sa_manager->hole;
184d38ceaf9SAlex Deucher 
185d38ceaf9SAlex Deucher 	if (hole->next != &sa_manager->olist) {
186d38ceaf9SAlex Deucher 		return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
187d38ceaf9SAlex Deucher 	}
188d38ceaf9SAlex Deucher 	return sa_manager->size;
189d38ceaf9SAlex Deucher }
190d38ceaf9SAlex Deucher 
191d38ceaf9SAlex Deucher static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
192d38ceaf9SAlex Deucher 				   struct amdgpu_sa_bo *sa_bo,
193d38ceaf9SAlex Deucher 				   unsigned size, unsigned align)
194d38ceaf9SAlex Deucher {
195d38ceaf9SAlex Deucher 	unsigned soffset, eoffset, wasted;
196d38ceaf9SAlex Deucher 
197d38ceaf9SAlex Deucher 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
198d38ceaf9SAlex Deucher 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
199d38ceaf9SAlex Deucher 	wasted = (align - (soffset % align)) % align;
200d38ceaf9SAlex Deucher 
201d38ceaf9SAlex Deucher 	if ((eoffset - soffset) >= (size + wasted)) {
202d38ceaf9SAlex Deucher 		soffset += wasted;
203d38ceaf9SAlex Deucher 
204d38ceaf9SAlex Deucher 		sa_bo->manager = sa_manager;
205d38ceaf9SAlex Deucher 		sa_bo->soffset = soffset;
206d38ceaf9SAlex Deucher 		sa_bo->eoffset = soffset + size;
207d38ceaf9SAlex Deucher 		list_add(&sa_bo->olist, sa_manager->hole);
208d38ceaf9SAlex Deucher 		INIT_LIST_HEAD(&sa_bo->flist);
209d38ceaf9SAlex Deucher 		sa_manager->hole = &sa_bo->olist;
210d38ceaf9SAlex Deucher 		return true;
211d38ceaf9SAlex Deucher 	}
212d38ceaf9SAlex Deucher 	return false;
213d38ceaf9SAlex Deucher }
214d38ceaf9SAlex Deucher 
215d38ceaf9SAlex Deucher /**
216d38ceaf9SAlex Deucher  * amdgpu_sa_event - Check if we can stop waiting
217d38ceaf9SAlex Deucher  *
218d38ceaf9SAlex Deucher  * @sa_manager: pointer to the sa_manager
219d38ceaf9SAlex Deucher  * @size: number of bytes we want to allocate
220d38ceaf9SAlex Deucher  * @align: alignment we need to match
221d38ceaf9SAlex Deucher  *
222d38ceaf9SAlex Deucher  * Check if either there is a fence we can wait for or
223d38ceaf9SAlex Deucher  * enough free memory to satisfy the allocation directly
224d38ceaf9SAlex Deucher  */
225d38ceaf9SAlex Deucher static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
226d38ceaf9SAlex Deucher 			    unsigned size, unsigned align)
227d38ceaf9SAlex Deucher {
228d38ceaf9SAlex Deucher 	unsigned soffset, eoffset, wasted;
229d38ceaf9SAlex Deucher 	int i;
230d38ceaf9SAlex Deucher 
231d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
232d38ceaf9SAlex Deucher 		if (!list_empty(&sa_manager->flist[i])) {
233d38ceaf9SAlex Deucher 			return true;
234d38ceaf9SAlex Deucher 		}
235d38ceaf9SAlex Deucher 	}
236d38ceaf9SAlex Deucher 
237d38ceaf9SAlex Deucher 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
238d38ceaf9SAlex Deucher 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
239d38ceaf9SAlex Deucher 	wasted = (align - (soffset % align)) % align;
240d38ceaf9SAlex Deucher 
241d38ceaf9SAlex Deucher 	if ((eoffset - soffset) >= (size + wasted)) {
242d38ceaf9SAlex Deucher 		return true;
243d38ceaf9SAlex Deucher 	}
244d38ceaf9SAlex Deucher 
245d38ceaf9SAlex Deucher 	return false;
246d38ceaf9SAlex Deucher }
247d38ceaf9SAlex Deucher 
248d38ceaf9SAlex Deucher static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
2494ce9891eSChunming Zhou 				   struct fence **fences,
250d38ceaf9SAlex Deucher 				   unsigned *tries)
251d38ceaf9SAlex Deucher {
252d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *best_bo = NULL;
253d38ceaf9SAlex Deucher 	unsigned i, soffset, best, tmp;
254d38ceaf9SAlex Deucher 
255d38ceaf9SAlex Deucher 	/* if hole points to the end of the buffer */
256d38ceaf9SAlex Deucher 	if (sa_manager->hole->next == &sa_manager->olist) {
257d38ceaf9SAlex Deucher 		/* try again with its beginning */
258d38ceaf9SAlex Deucher 		sa_manager->hole = &sa_manager->olist;
259d38ceaf9SAlex Deucher 		return true;
260d38ceaf9SAlex Deucher 	}
261d38ceaf9SAlex Deucher 
262d38ceaf9SAlex Deucher 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
263d38ceaf9SAlex Deucher 	/* to handle wrap around we add sa_manager->size */
264d38ceaf9SAlex Deucher 	best = sa_manager->size * 2;
265d38ceaf9SAlex Deucher 	/* go over all fence list and try to find the closest sa_bo
266d38ceaf9SAlex Deucher 	 * of the current last
267d38ceaf9SAlex Deucher 	 */
268d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
269d38ceaf9SAlex Deucher 		struct amdgpu_sa_bo *sa_bo;
270d38ceaf9SAlex Deucher 
271d38ceaf9SAlex Deucher 		if (list_empty(&sa_manager->flist[i])) {
272d38ceaf9SAlex Deucher 			continue;
273d38ceaf9SAlex Deucher 		}
274d38ceaf9SAlex Deucher 
275d38ceaf9SAlex Deucher 		sa_bo = list_first_entry(&sa_manager->flist[i],
276d38ceaf9SAlex Deucher 					 struct amdgpu_sa_bo, flist);
277d38ceaf9SAlex Deucher 
2784ce9891eSChunming Zhou 		if (!fence_is_signaled(sa_bo->fence)) {
279d38ceaf9SAlex Deucher 			fences[i] = sa_bo->fence;
280d38ceaf9SAlex Deucher 			continue;
281d38ceaf9SAlex Deucher 		}
282d38ceaf9SAlex Deucher 
283d38ceaf9SAlex Deucher 		/* limit the number of tries each ring gets */
284d38ceaf9SAlex Deucher 		if (tries[i] > 2) {
285d38ceaf9SAlex Deucher 			continue;
286d38ceaf9SAlex Deucher 		}
287d38ceaf9SAlex Deucher 
288d38ceaf9SAlex Deucher 		tmp = sa_bo->soffset;
289d38ceaf9SAlex Deucher 		if (tmp < soffset) {
290d38ceaf9SAlex Deucher 			/* wrap around, pretend it's after */
291d38ceaf9SAlex Deucher 			tmp += sa_manager->size;
292d38ceaf9SAlex Deucher 		}
293d38ceaf9SAlex Deucher 		tmp -= soffset;
294d38ceaf9SAlex Deucher 		if (tmp < best) {
295d38ceaf9SAlex Deucher 			/* this sa bo is the closest one */
296d38ceaf9SAlex Deucher 			best = tmp;
297d38ceaf9SAlex Deucher 			best_bo = sa_bo;
298d38ceaf9SAlex Deucher 		}
299d38ceaf9SAlex Deucher 	}
300d38ceaf9SAlex Deucher 
301d38ceaf9SAlex Deucher 	if (best_bo) {
3028120b61fSChristian König 		uint32_t idx = amdgpu_ring_from_fence(best_bo->fence)->idx;
3034ce9891eSChunming Zhou 		++tries[idx];
304d38ceaf9SAlex Deucher 		sa_manager->hole = best_bo->olist.prev;
305d38ceaf9SAlex Deucher 
306d38ceaf9SAlex Deucher 		/* we knew that this one is signaled,
307d38ceaf9SAlex Deucher 		   so it's save to remote it */
308d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(best_bo);
309d38ceaf9SAlex Deucher 		return true;
310d38ceaf9SAlex Deucher 	}
311d38ceaf9SAlex Deucher 	return false;
312d38ceaf9SAlex Deucher }
313d38ceaf9SAlex Deucher 
314bbf0b345SJunwei Zhang int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
315d38ceaf9SAlex Deucher 		     struct amdgpu_sa_bo **sa_bo,
316d38ceaf9SAlex Deucher 		     unsigned size, unsigned align)
317d38ceaf9SAlex Deucher {
3184ce9891eSChunming Zhou 	struct fence *fences[AMDGPU_MAX_RINGS];
319d38ceaf9SAlex Deucher 	unsigned tries[AMDGPU_MAX_RINGS];
320ee327cafSChristian König 	unsigned count;
321d38ceaf9SAlex Deucher 	int i, r;
322a8f5bf0bSmonk.liu 	signed long t;
323d38ceaf9SAlex Deucher 
324d38ceaf9SAlex Deucher 	BUG_ON(align > sa_manager->align);
325d38ceaf9SAlex Deucher 	BUG_ON(size > sa_manager->size);
326d38ceaf9SAlex Deucher 
327d38ceaf9SAlex Deucher 	*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
328d38ceaf9SAlex Deucher 	if ((*sa_bo) == NULL) {
329d38ceaf9SAlex Deucher 		return -ENOMEM;
330d38ceaf9SAlex Deucher 	}
331d38ceaf9SAlex Deucher 	(*sa_bo)->manager = sa_manager;
332d38ceaf9SAlex Deucher 	(*sa_bo)->fence = NULL;
333d38ceaf9SAlex Deucher 	INIT_LIST_HEAD(&(*sa_bo)->olist);
334d38ceaf9SAlex Deucher 	INIT_LIST_HEAD(&(*sa_bo)->flist);
335d38ceaf9SAlex Deucher 
336d38ceaf9SAlex Deucher 	spin_lock(&sa_manager->wq.lock);
337d38ceaf9SAlex Deucher 	do {
338d38ceaf9SAlex Deucher 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
339d38ceaf9SAlex Deucher 			fences[i] = NULL;
340d38ceaf9SAlex Deucher 			tries[i] = 0;
341d38ceaf9SAlex Deucher 		}
342d38ceaf9SAlex Deucher 
343d38ceaf9SAlex Deucher 		do {
344d38ceaf9SAlex Deucher 			amdgpu_sa_bo_try_free(sa_manager);
345d38ceaf9SAlex Deucher 
346d38ceaf9SAlex Deucher 			if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
347d38ceaf9SAlex Deucher 						   size, align)) {
348d38ceaf9SAlex Deucher 				spin_unlock(&sa_manager->wq.lock);
349d38ceaf9SAlex Deucher 				return 0;
350d38ceaf9SAlex Deucher 			}
351d38ceaf9SAlex Deucher 
352d38ceaf9SAlex Deucher 			/* see if we can skip over some allocations */
353d38ceaf9SAlex Deucher 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
354d38ceaf9SAlex Deucher 
355ee327cafSChristian König 		for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
356ee327cafSChristian König 			if (fences[i])
357a8d81b36SNicolai Hähnle 				fences[count++] = fence_get(fences[i]);
358ee327cafSChristian König 
359ee327cafSChristian König 		if (count) {
360d38ceaf9SAlex Deucher 			spin_unlock(&sa_manager->wq.lock);
361ee327cafSChristian König 			t = fence_wait_any_timeout(fences, count, false,
362ee327cafSChristian König 						   MAX_SCHEDULE_TIMEOUT);
363a8d81b36SNicolai Hähnle 			for (i = 0; i < count; ++i)
364a8d81b36SNicolai Hähnle 				fence_put(fences[i]);
365a8d81b36SNicolai Hähnle 
366a8f5bf0bSmonk.liu 			r = (t > 0) ? 0 : t;
367d38ceaf9SAlex Deucher 			spin_lock(&sa_manager->wq.lock);
368ee327cafSChristian König 		} else {
369d38ceaf9SAlex Deucher 			/* if we have nothing to wait for block */
370d38ceaf9SAlex Deucher 			r = wait_event_interruptible_locked(
371d38ceaf9SAlex Deucher 				sa_manager->wq,
372d38ceaf9SAlex Deucher 				amdgpu_sa_event(sa_manager, size, align)
373d38ceaf9SAlex Deucher 			);
374d38ceaf9SAlex Deucher 		}
375d38ceaf9SAlex Deucher 
376d38ceaf9SAlex Deucher 	} while (!r);
377d38ceaf9SAlex Deucher 
378d38ceaf9SAlex Deucher 	spin_unlock(&sa_manager->wq.lock);
379d38ceaf9SAlex Deucher 	kfree(*sa_bo);
380d38ceaf9SAlex Deucher 	*sa_bo = NULL;
381d38ceaf9SAlex Deucher 	return r;
382d38ceaf9SAlex Deucher }
383d38ceaf9SAlex Deucher 
384d38ceaf9SAlex Deucher void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
3854ce9891eSChunming Zhou 		       struct fence *fence)
386d38ceaf9SAlex Deucher {
387d38ceaf9SAlex Deucher 	struct amdgpu_sa_manager *sa_manager;
388d38ceaf9SAlex Deucher 
389d38ceaf9SAlex Deucher 	if (sa_bo == NULL || *sa_bo == NULL) {
390d38ceaf9SAlex Deucher 		return;
391d38ceaf9SAlex Deucher 	}
392d38ceaf9SAlex Deucher 
393d38ceaf9SAlex Deucher 	sa_manager = (*sa_bo)->manager;
394d38ceaf9SAlex Deucher 	spin_lock(&sa_manager->wq.lock);
3954ce9891eSChunming Zhou 	if (fence && !fence_is_signaled(fence)) {
3964ce9891eSChunming Zhou 		uint32_t idx;
3974ce9891eSChunming Zhou 		(*sa_bo)->fence = fence_get(fence);
3988120b61fSChristian König 		idx = amdgpu_ring_from_fence(fence)->idx;
3994ce9891eSChunming Zhou 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
400d38ceaf9SAlex Deucher 	} else {
401d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(*sa_bo);
402d38ceaf9SAlex Deucher 	}
403d38ceaf9SAlex Deucher 	wake_up_all_locked(&sa_manager->wq);
404d38ceaf9SAlex Deucher 	spin_unlock(&sa_manager->wq.lock);
405d38ceaf9SAlex Deucher 	*sa_bo = NULL;
406d38ceaf9SAlex Deucher }
407d38ceaf9SAlex Deucher 
408d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS)
4094f839a24SChristian König 
4104f839a24SChristian König static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
4114f839a24SChristian König {
4124f839a24SChristian König 	struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
4134f839a24SChristian König 	struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
4144f839a24SChristian König 
4154f839a24SChristian König 	if (a_fence)
4164f839a24SChristian König 		seq_printf(m, " protected by 0x%016llx on ring %d",
4174f839a24SChristian König 			   a_fence->seq, a_fence->ring->idx);
4184f839a24SChristian König 
4194f839a24SChristian König 	if (s_fence) {
4204f839a24SChristian König 		struct amdgpu_ring *ring;
4214f839a24SChristian König 
4224f839a24SChristian König 
4234f839a24SChristian König 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
4244f839a24SChristian König 		seq_printf(m, " protected by 0x%016x on ring %d",
4254f839a24SChristian König 			   s_fence->base.seqno, ring->idx);
4264f839a24SChristian König 	}
4274f839a24SChristian König }
4284f839a24SChristian König 
429d38ceaf9SAlex Deucher void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
430d38ceaf9SAlex Deucher 				  struct seq_file *m)
431d38ceaf9SAlex Deucher {
432d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *i;
433d38ceaf9SAlex Deucher 
434d38ceaf9SAlex Deucher 	spin_lock(&sa_manager->wq.lock);
435d38ceaf9SAlex Deucher 	list_for_each_entry(i, &sa_manager->olist, olist) {
436d38ceaf9SAlex Deucher 		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
437d38ceaf9SAlex Deucher 		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
438d38ceaf9SAlex Deucher 		if (&i->olist == sa_manager->hole) {
439d38ceaf9SAlex Deucher 			seq_printf(m, ">");
440d38ceaf9SAlex Deucher 		} else {
441d38ceaf9SAlex Deucher 			seq_printf(m, " ");
442d38ceaf9SAlex Deucher 		}
443d38ceaf9SAlex Deucher 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
444d38ceaf9SAlex Deucher 			   soffset, eoffset, eoffset - soffset);
4454f839a24SChristian König 		if (i->fence)
4464f839a24SChristian König 			amdgpu_sa_bo_dump_fence(i->fence, m);
447d38ceaf9SAlex Deucher 		seq_printf(m, "\n");
448d38ceaf9SAlex Deucher 	}
449d38ceaf9SAlex Deucher 	spin_unlock(&sa_manager->wq.lock);
450d38ceaf9SAlex Deucher }
451d38ceaf9SAlex Deucher #endif
452