xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_resource.c (revision e4df2d5e)
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 
25 #include <linux/iosys-map.h>
26 #include <linux/io-mapping.h>
27 #include <linux/scatterlist.h>
28 
29 #include <drm/ttm/ttm_resource.h>
30 #include <drm/ttm/ttm_bo_driver.h>
31 
32 /**
33  * ttm_resource_init - resource object constructure
34  * @bo: buffer object this resources is allocated for
35  * @place: placement of the resource
36  * @res: the resource object to inistilize
37  *
38  * Initialize a new resource object. Counterpart of &ttm_resource_fini.
39  */
40 void ttm_resource_init(struct ttm_buffer_object *bo,
41                        const struct ttm_place *place,
42                        struct ttm_resource *res)
43 {
44 	struct ttm_resource_manager *man;
45 
46 	res->start = 0;
47 	res->num_pages = PFN_UP(bo->base.size);
48 	res->mem_type = place->mem_type;
49 	res->placement = place->flags;
50 	res->bus.addr = NULL;
51 	res->bus.offset = 0;
52 	res->bus.is_iomem = false;
53 	res->bus.caching = ttm_cached;
54 	res->bo = bo;
55 
56 	man = ttm_manager_type(bo->bdev, place->mem_type);
57 	spin_lock(&bo->bdev->lru_lock);
58 	man->usage += bo->base.size;
59 	spin_unlock(&bo->bdev->lru_lock);
60 }
61 EXPORT_SYMBOL(ttm_resource_init);
62 
63 /**
64  * ttm_resource_fini - resource destructor
65  * @man: the resource manager this resource belongs to
66  * @res: the resource to clean up
67  *
68  * Should be used by resource manager backends to clean up the TTM resource
69  * objects before freeing the underlying structure. Counterpart of
70  * &ttm_resource_init
71  */
72 void ttm_resource_fini(struct ttm_resource_manager *man,
73 		       struct ttm_resource *res)
74 {
75 	spin_lock(&man->bdev->lru_lock);
76 	man->usage -= res->bo->base.size;
77 	spin_unlock(&man->bdev->lru_lock);
78 }
79 EXPORT_SYMBOL(ttm_resource_fini);
80 
81 int ttm_resource_alloc(struct ttm_buffer_object *bo,
82 		       const struct ttm_place *place,
83 		       struct ttm_resource **res_ptr)
84 {
85 	struct ttm_resource_manager *man =
86 		ttm_manager_type(bo->bdev, place->mem_type);
87 
88 	return man->func->alloc(man, bo, place, res_ptr);
89 }
90 
91 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
92 {
93 	struct ttm_resource_manager *man;
94 
95 	if (!*res)
96 		return;
97 
98 	man = ttm_manager_type(bo->bdev, (*res)->mem_type);
99 	man->func->free(man, *res);
100 	*res = NULL;
101 }
102 EXPORT_SYMBOL(ttm_resource_free);
103 
104 static bool ttm_resource_places_compat(struct ttm_resource *res,
105 				       const struct ttm_place *places,
106 				       unsigned num_placement)
107 {
108 	unsigned i;
109 
110 	if (res->placement & TTM_PL_FLAG_TEMPORARY)
111 		return false;
112 
113 	for (i = 0; i < num_placement; i++) {
114 		const struct ttm_place *heap = &places[i];
115 
116 		if (res->start < heap->fpfn || (heap->lpfn &&
117 		    (res->start + res->num_pages) > heap->lpfn))
118 			continue;
119 
120 		if ((res->mem_type == heap->mem_type) &&
121 		    (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
122 		     (res->placement & TTM_PL_FLAG_CONTIGUOUS)))
123 			return true;
124 	}
125 	return false;
126 }
127 
128 /**
129  * ttm_resource_compat - check if resource is compatible with placement
130  *
131  * @res: the resource to check
132  * @placement: the placement to check against
133  *
134  * Returns true if the placement is compatible.
135  */
136 bool ttm_resource_compat(struct ttm_resource *res,
137 			 struct ttm_placement *placement)
138 {
139 	if (ttm_resource_places_compat(res, placement->placement,
140 				       placement->num_placement))
141 		return true;
142 
143 	if ((placement->busy_placement != placement->placement ||
144 	     placement->num_busy_placement > placement->num_placement) &&
145 	    ttm_resource_places_compat(res, placement->busy_placement,
146 				       placement->num_busy_placement))
147 		return true;
148 
149 	return false;
150 }
151 EXPORT_SYMBOL(ttm_resource_compat);
152 
153 void ttm_resource_set_bo(struct ttm_resource *res,
154 			 struct ttm_buffer_object *bo)
155 {
156 	spin_lock(&bo->bdev->lru_lock);
157 	res->bo = bo;
158 	spin_unlock(&bo->bdev->lru_lock);
159 }
160 
161 /**
162  * ttm_resource_manager_init
163  *
164  * @man: memory manager object to init
165  * @bdev: ttm device this manager belongs to
166  * @size: size of managed resources in arbitrary units
167  *
168  * Initialise core parts of a manager object.
169  */
170 void ttm_resource_manager_init(struct ttm_resource_manager *man,
171 			       struct ttm_device *bdev,
172 			       uint64_t size)
173 {
174 	unsigned i;
175 
176 	spin_lock_init(&man->move_lock);
177 	man->bdev = bdev;
178 	man->size = size;
179 	man->usage = 0;
180 
181 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
182 		INIT_LIST_HEAD(&man->lru[i]);
183 	man->move = NULL;
184 }
185 EXPORT_SYMBOL(ttm_resource_manager_init);
186 
187 /*
188  * ttm_resource_manager_evict_all
189  *
190  * @bdev - device to use
191  * @man - manager to use
192  *
193  * Evict all the objects out of a memory manager until it is empty.
194  * Part of memory manager cleanup sequence.
195  */
196 int ttm_resource_manager_evict_all(struct ttm_device *bdev,
197 				   struct ttm_resource_manager *man)
198 {
199 	struct ttm_operation_ctx ctx = {
200 		.interruptible = false,
201 		.no_wait_gpu = false,
202 		.force_alloc = true
203 	};
204 	struct dma_fence *fence;
205 	int ret;
206 	unsigned i;
207 
208 	/*
209 	 * Can't use standard list traversal since we're unlocking.
210 	 */
211 
212 	spin_lock(&bdev->lru_lock);
213 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
214 		while (!list_empty(&man->lru[i])) {
215 			spin_unlock(&bdev->lru_lock);
216 			ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
217 						  NULL);
218 			if (ret)
219 				return ret;
220 			spin_lock(&bdev->lru_lock);
221 		}
222 	}
223 	spin_unlock(&bdev->lru_lock);
224 
225 	spin_lock(&man->move_lock);
226 	fence = dma_fence_get(man->move);
227 	spin_unlock(&man->move_lock);
228 
229 	if (fence) {
230 		ret = dma_fence_wait(fence, false);
231 		dma_fence_put(fence);
232 		if (ret)
233 			return ret;
234 	}
235 
236 	return 0;
237 }
238 EXPORT_SYMBOL(ttm_resource_manager_evict_all);
239 
240 /**
241  * ttm_resource_manager_usage
242  *
243  * @man: A memory manager object.
244  *
245  * Return how many resources are currently used.
246  */
247 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man)
248 {
249 	uint64_t usage;
250 
251 	spin_lock(&man->bdev->lru_lock);
252 	usage = man->usage;
253 	spin_unlock(&man->bdev->lru_lock);
254 	return usage;
255 }
256 EXPORT_SYMBOL(ttm_resource_manager_usage);
257 
258 /**
259  * ttm_resource_manager_debug
260  *
261  * @man: manager type to dump.
262  * @p: printer to use for debug.
263  */
264 void ttm_resource_manager_debug(struct ttm_resource_manager *man,
265 				struct drm_printer *p)
266 {
267 	drm_printf(p, "  use_type: %d\n", man->use_type);
268 	drm_printf(p, "  use_tt: %d\n", man->use_tt);
269 	drm_printf(p, "  size: %llu\n", man->size);
270 	drm_printf(p, "  usage: %llu\n", ttm_resource_manager_usage(man));
271 	if (man->func->debug)
272 		man->func->debug(man, p);
273 }
274 EXPORT_SYMBOL(ttm_resource_manager_debug);
275 
276 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
277 					  struct iosys_map *dmap,
278 					  pgoff_t i)
279 {
280 	struct ttm_kmap_iter_iomap *iter_io =
281 		container_of(iter, typeof(*iter_io), base);
282 	void __iomem *addr;
283 
284 retry:
285 	while (i >= iter_io->cache.end) {
286 		iter_io->cache.sg = iter_io->cache.sg ?
287 			sg_next(iter_io->cache.sg) : iter_io->st->sgl;
288 		iter_io->cache.i = iter_io->cache.end;
289 		iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >>
290 			PAGE_SHIFT;
291 		iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) -
292 			iter_io->start;
293 	}
294 
295 	if (i < iter_io->cache.i) {
296 		iter_io->cache.end = 0;
297 		iter_io->cache.sg = NULL;
298 		goto retry;
299 	}
300 
301 	addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
302 				       (((resource_size_t)i - iter_io->cache.i)
303 					<< PAGE_SHIFT));
304 	iosys_map_set_vaddr_iomem(dmap, addr);
305 }
306 
307 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
308 					    struct iosys_map *map)
309 {
310 	io_mapping_unmap_local(map->vaddr_iomem);
311 }
312 
313 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {
314 	.map_local =  ttm_kmap_iter_iomap_map_local,
315 	.unmap_local = ttm_kmap_iter_iomap_unmap_local,
316 	.maps_tt = false,
317 };
318 
319 /**
320  * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap
321  * @iter_io: The struct ttm_kmap_iter_iomap to initialize.
322  * @iomap: The struct io_mapping representing the underlying linear io_memory.
323  * @st: sg_table into @iomap, representing the memory of the struct
324  * ttm_resource.
325  * @start: Offset that needs to be subtracted from @st to make
326  * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
327  *
328  * Return: Pointer to the embedded struct ttm_kmap_iter.
329  */
330 struct ttm_kmap_iter *
331 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
332 			 struct io_mapping *iomap,
333 			 struct sg_table *st,
334 			 resource_size_t start)
335 {
336 	iter_io->base.ops = &ttm_kmap_iter_io_ops;
337 	iter_io->iomap = iomap;
338 	iter_io->st = st;
339 	iter_io->start = start;
340 	memset(&iter_io->cache, 0, sizeof(iter_io->cache));
341 
342 	return &iter_io->base;
343 }
344 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);
345 
346 /**
347  * DOC: Linear io iterator
348  *
349  * This code should die in the not too near future. Best would be if we could
350  * make io-mapping use memremap for all io memory, and have memremap
351  * implement a kmap_local functionality. We could then strip a huge amount of
352  * code. These linear io iterators are implemented to mimic old functionality,
353  * and they don't use kmap_local semantics at all internally. Rather ioremap or
354  * friends, and at least on 32-bit they add global TLB flushes and points
355  * of failure.
356  */
357 
358 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
359 					      struct iosys_map *dmap,
360 					      pgoff_t i)
361 {
362 	struct ttm_kmap_iter_linear_io *iter_io =
363 		container_of(iter, typeof(*iter_io), base);
364 
365 	*dmap = iter_io->dmap;
366 	iosys_map_incr(dmap, i * PAGE_SIZE);
367 }
368 
369 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
370 	.map_local =  ttm_kmap_iter_linear_io_map_local,
371 	.maps_tt = false,
372 };
373 
374 /**
375  * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory
376  * @iter_io: The iterator to initialize
377  * @bdev: The TTM device
378  * @mem: The ttm resource representing the iomap.
379  *
380  * This function is for internal TTM use only. It sets up a memcpy kmap iterator
381  * pointing at a linear chunk of io memory.
382  *
383  * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on
384  * failure.
385  */
386 struct ttm_kmap_iter *
387 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
388 			     struct ttm_device *bdev,
389 			     struct ttm_resource *mem)
390 {
391 	int ret;
392 
393 	ret = ttm_mem_io_reserve(bdev, mem);
394 	if (ret)
395 		goto out_err;
396 	if (!mem->bus.is_iomem) {
397 		ret = -EINVAL;
398 		goto out_io_free;
399 	}
400 
401 	if (mem->bus.addr) {
402 		iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
403 		iter_io->needs_unmap = false;
404 	} else {
405 		size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
406 
407 		iter_io->needs_unmap = true;
408 		memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
409 		if (mem->bus.caching == ttm_write_combined)
410 			iosys_map_set_vaddr_iomem(&iter_io->dmap,
411 						  ioremap_wc(mem->bus.offset,
412 							     bus_size));
413 		else if (mem->bus.caching == ttm_cached)
414 			iosys_map_set_vaddr(&iter_io->dmap,
415 					    memremap(mem->bus.offset, bus_size,
416 						     MEMREMAP_WB |
417 						     MEMREMAP_WT |
418 						     MEMREMAP_WC));
419 
420 		/* If uncached requested or if mapping cached or wc failed */
421 		if (iosys_map_is_null(&iter_io->dmap))
422 			iosys_map_set_vaddr_iomem(&iter_io->dmap,
423 						  ioremap(mem->bus.offset,
424 							  bus_size));
425 
426 		if (iosys_map_is_null(&iter_io->dmap)) {
427 			ret = -ENOMEM;
428 			goto out_io_free;
429 		}
430 	}
431 
432 	iter_io->base.ops = &ttm_kmap_iter_linear_io_ops;
433 	return &iter_io->base;
434 
435 out_io_free:
436 	ttm_mem_io_free(bdev, mem);
437 out_err:
438 	return ERR_PTR(ret);
439 }
440 
441 /**
442  * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory
443  * @iter_io: The iterator to initialize
444  * @bdev: The TTM device
445  * @mem: The ttm resource representing the iomap.
446  *
447  * This function is for internal TTM use only. It cleans up a memcpy kmap
448  * iterator initialized by ttm_kmap_iter_linear_io_init.
449  */
450 void
451 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
452 			     struct ttm_device *bdev,
453 			     struct ttm_resource *mem)
454 {
455 	if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) {
456 		if (iter_io->dmap.is_iomem)
457 			iounmap(iter_io->dmap.vaddr_iomem);
458 		else
459 			memunmap(iter_io->dmap.vaddr);
460 	}
461 
462 	ttm_mem_io_free(bdev, mem);
463 }
464