1 /*
2  * Copyright 2011 Red Hat Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  */
30 /* Algorithm:
31  *
32  * We store the last allocated bo in "hole", we always try to allocate
33  * after the last allocated bo. Principle is that in a linear GPU ring
34  * progression was is after last is the oldest bo we allocated and thus
35  * the first one that should no longer be in use by the GPU.
36  *
37  * If it's not the case we skip over the bo after last to the closest
38  * done bo if such one exist. If none exist and we are not asked to
39  * block we report failure to allocate.
40  *
41  * If we are asked to block we wait on all the oldest fence of all
42  * rings. We just wait for any of those fence to complete.
43  */
44 #include <drm/drmP.h>
45 #include "amdgpu.h"
46 
47 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
48 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
49 
50 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
51 			      struct amdgpu_sa_manager *sa_manager,
52 			      unsigned size, u32 align, u32 domain)
53 {
54 	int i, r;
55 
56 	init_waitqueue_head(&sa_manager->wq);
57 	sa_manager->bo = NULL;
58 	sa_manager->size = size;
59 	sa_manager->domain = domain;
60 	sa_manager->align = align;
61 	sa_manager->hole = &sa_manager->olist;
62 	INIT_LIST_HEAD(&sa_manager->olist);
63 	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
64 		INIT_LIST_HEAD(&sa_manager->flist[i]);
65 
66 	r = amdgpu_bo_create(adev, size, align, true, domain,
67 			     0, NULL, NULL, &sa_manager->bo);
68 	if (r) {
69 		dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
70 		return r;
71 	}
72 
73 	return r;
74 }
75 
76 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
77 			       struct amdgpu_sa_manager *sa_manager)
78 {
79 	struct amdgpu_sa_bo *sa_bo, *tmp;
80 
81 	if (!list_empty(&sa_manager->olist)) {
82 		sa_manager->hole = &sa_manager->olist,
83 		amdgpu_sa_bo_try_free(sa_manager);
84 		if (!list_empty(&sa_manager->olist)) {
85 			dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
86 		}
87 	}
88 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
89 		amdgpu_sa_bo_remove_locked(sa_bo);
90 	}
91 	amdgpu_bo_unref(&sa_manager->bo);
92 	sa_manager->size = 0;
93 }
94 
95 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
96 			       struct amdgpu_sa_manager *sa_manager)
97 {
98 	int r;
99 
100 	if (sa_manager->bo == NULL) {
101 		dev_err(adev->dev, "no bo for sa manager\n");
102 		return -EINVAL;
103 	}
104 
105 	/* map the buffer */
106 	r = amdgpu_bo_reserve(sa_manager->bo, false);
107 	if (r) {
108 		dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
109 		return r;
110 	}
111 	r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
112 	if (r) {
113 		amdgpu_bo_unreserve(sa_manager->bo);
114 		dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
115 		return r;
116 	}
117 	r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
118 	amdgpu_bo_unreserve(sa_manager->bo);
119 	return r;
120 }
121 
122 int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
123 				 struct amdgpu_sa_manager *sa_manager)
124 {
125 	int r;
126 
127 	if (sa_manager->bo == NULL) {
128 		dev_err(adev->dev, "no bo for sa manager\n");
129 		return -EINVAL;
130 	}
131 
132 	r = amdgpu_bo_reserve(sa_manager->bo, false);
133 	if (!r) {
134 		amdgpu_bo_kunmap(sa_manager->bo);
135 		amdgpu_bo_unpin(sa_manager->bo);
136 		amdgpu_bo_unreserve(sa_manager->bo);
137 	}
138 	return r;
139 }
140 
141 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
142 {
143 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
144 	if (sa_manager->hole == &sa_bo->olist) {
145 		sa_manager->hole = sa_bo->olist.prev;
146 	}
147 	list_del_init(&sa_bo->olist);
148 	list_del_init(&sa_bo->flist);
149 	fence_put(sa_bo->fence);
150 	kfree(sa_bo);
151 }
152 
153 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
154 {
155 	struct amdgpu_sa_bo *sa_bo, *tmp;
156 
157 	if (sa_manager->hole->next == &sa_manager->olist)
158 		return;
159 
160 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
161 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
162 		if (sa_bo->fence == NULL ||
163 		    !fence_is_signaled(sa_bo->fence)) {
164 			return;
165 		}
166 		amdgpu_sa_bo_remove_locked(sa_bo);
167 	}
168 }
169 
170 static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
171 {
172 	struct list_head *hole = sa_manager->hole;
173 
174 	if (hole != &sa_manager->olist) {
175 		return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
176 	}
177 	return 0;
178 }
179 
180 static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
181 {
182 	struct list_head *hole = sa_manager->hole;
183 
184 	if (hole->next != &sa_manager->olist) {
185 		return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
186 	}
187 	return sa_manager->size;
188 }
189 
190 static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
191 				   struct amdgpu_sa_bo *sa_bo,
192 				   unsigned size, unsigned align)
193 {
194 	unsigned soffset, eoffset, wasted;
195 
196 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
197 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
198 	wasted = (align - (soffset % align)) % align;
199 
200 	if ((eoffset - soffset) >= (size + wasted)) {
201 		soffset += wasted;
202 
203 		sa_bo->manager = sa_manager;
204 		sa_bo->soffset = soffset;
205 		sa_bo->eoffset = soffset + size;
206 		list_add(&sa_bo->olist, sa_manager->hole);
207 		INIT_LIST_HEAD(&sa_bo->flist);
208 		sa_manager->hole = &sa_bo->olist;
209 		return true;
210 	}
211 	return false;
212 }
213 
214 /**
215  * amdgpu_sa_event - Check if we can stop waiting
216  *
217  * @sa_manager: pointer to the sa_manager
218  * @size: number of bytes we want to allocate
219  * @align: alignment we need to match
220  *
221  * Check if either there is a fence we can wait for or
222  * enough free memory to satisfy the allocation directly
223  */
224 static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
225 			    unsigned size, unsigned align)
226 {
227 	unsigned soffset, eoffset, wasted;
228 	int i;
229 
230 	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
231 		if (!list_empty(&sa_manager->flist[i]))
232 			return true;
233 
234 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
235 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
236 	wasted = (align - (soffset % align)) % align;
237 
238 	if ((eoffset - soffset) >= (size + wasted)) {
239 		return true;
240 	}
241 
242 	return false;
243 }
244 
245 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
246 				   struct fence **fences,
247 				   unsigned *tries)
248 {
249 	struct amdgpu_sa_bo *best_bo = NULL;
250 	unsigned i, soffset, best, tmp;
251 
252 	/* if hole points to the end of the buffer */
253 	if (sa_manager->hole->next == &sa_manager->olist) {
254 		/* try again with its beginning */
255 		sa_manager->hole = &sa_manager->olist;
256 		return true;
257 	}
258 
259 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
260 	/* to handle wrap around we add sa_manager->size */
261 	best = sa_manager->size * 2;
262 	/* go over all fence list and try to find the closest sa_bo
263 	 * of the current last
264 	 */
265 	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
266 		struct amdgpu_sa_bo *sa_bo;
267 
268 		if (list_empty(&sa_manager->flist[i]))
269 			continue;
270 
271 		sa_bo = list_first_entry(&sa_manager->flist[i],
272 					 struct amdgpu_sa_bo, flist);
273 
274 		if (!fence_is_signaled(sa_bo->fence)) {
275 			fences[i] = sa_bo->fence;
276 			continue;
277 		}
278 
279 		/* limit the number of tries each ring gets */
280 		if (tries[i] > 2) {
281 			continue;
282 		}
283 
284 		tmp = sa_bo->soffset;
285 		if (tmp < soffset) {
286 			/* wrap around, pretend it's after */
287 			tmp += sa_manager->size;
288 		}
289 		tmp -= soffset;
290 		if (tmp < best) {
291 			/* this sa bo is the closest one */
292 			best = tmp;
293 			best_bo = sa_bo;
294 		}
295 	}
296 
297 	if (best_bo) {
298 		uint32_t idx = best_bo->fence->context;
299 
300 		idx %= AMDGPU_SA_NUM_FENCE_LISTS;
301 		++tries[idx];
302 		sa_manager->hole = best_bo->olist.prev;
303 
304 		/* we knew that this one is signaled,
305 		   so it's save to remote it */
306 		amdgpu_sa_bo_remove_locked(best_bo);
307 		return true;
308 	}
309 	return false;
310 }
311 
312 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
313 		     struct amdgpu_sa_bo **sa_bo,
314 		     unsigned size, unsigned align)
315 {
316 	struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
317 	unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
318 	unsigned count;
319 	int i, r;
320 	signed long t;
321 
322 	if (WARN_ON_ONCE(align > sa_manager->align))
323 		return -EINVAL;
324 
325 	if (WARN_ON_ONCE(size > sa_manager->size))
326 		return -EINVAL;
327 
328 	*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
329 	if ((*sa_bo) == NULL) {
330 		return -ENOMEM;
331 	}
332 	(*sa_bo)->manager = sa_manager;
333 	(*sa_bo)->fence = NULL;
334 	INIT_LIST_HEAD(&(*sa_bo)->olist);
335 	INIT_LIST_HEAD(&(*sa_bo)->flist);
336 
337 	spin_lock(&sa_manager->wq.lock);
338 	do {
339 		for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
340 			fences[i] = NULL;
341 			tries[i] = 0;
342 		}
343 
344 		do {
345 			amdgpu_sa_bo_try_free(sa_manager);
346 
347 			if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
348 						   size, align)) {
349 				spin_unlock(&sa_manager->wq.lock);
350 				return 0;
351 			}
352 
353 			/* see if we can skip over some allocations */
354 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
355 
356 		for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
357 			if (fences[i])
358 				fences[count++] = fence_get(fences[i]);
359 
360 		if (count) {
361 			spin_unlock(&sa_manager->wq.lock);
362 			t = fence_wait_any_timeout(fences, count, false,
363 						   MAX_SCHEDULE_TIMEOUT);
364 			for (i = 0; i < count; ++i)
365 				fence_put(fences[i]);
366 
367 			r = (t > 0) ? 0 : t;
368 			spin_lock(&sa_manager->wq.lock);
369 		} else {
370 			/* if we have nothing to wait for block */
371 			r = wait_event_interruptible_locked(
372 				sa_manager->wq,
373 				amdgpu_sa_event(sa_manager, size, align)
374 			);
375 		}
376 
377 	} while (!r);
378 
379 	spin_unlock(&sa_manager->wq.lock);
380 	kfree(*sa_bo);
381 	*sa_bo = NULL;
382 	return r;
383 }
384 
385 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
386 		       struct fence *fence)
387 {
388 	struct amdgpu_sa_manager *sa_manager;
389 
390 	if (sa_bo == NULL || *sa_bo == NULL) {
391 		return;
392 	}
393 
394 	sa_manager = (*sa_bo)->manager;
395 	spin_lock(&sa_manager->wq.lock);
396 	if (fence && !fence_is_signaled(fence)) {
397 		uint32_t idx;
398 
399 		(*sa_bo)->fence = fence_get(fence);
400 		idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
401 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
402 	} else {
403 		amdgpu_sa_bo_remove_locked(*sa_bo);
404 	}
405 	wake_up_all_locked(&sa_manager->wq);
406 	spin_unlock(&sa_manager->wq.lock);
407 	*sa_bo = NULL;
408 }
409 
410 #if defined(CONFIG_DEBUG_FS)
411 
412 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
413 				  struct seq_file *m)
414 {
415 	struct amdgpu_sa_bo *i;
416 
417 	spin_lock(&sa_manager->wq.lock);
418 	list_for_each_entry(i, &sa_manager->olist, olist) {
419 		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
420 		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
421 		if (&i->olist == sa_manager->hole) {
422 			seq_printf(m, ">");
423 		} else {
424 			seq_printf(m, " ");
425 		}
426 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
427 			   soffset, eoffset, eoffset - soffset);
428 
429 		if (i->fence)
430 			seq_printf(m, " protected by 0x%08x on context %d",
431 				   i->fence->seqno, i->fence->context);
432 
433 		seq_printf(m, "\n");
434 	}
435 	spin_unlock(&sa_manager->wq.lock);
436 }
437 #endif
438