188be9a0aSMatthew Auld // SPDX-License-Identifier: MIT
288be9a0aSMatthew Auld /*
388be9a0aSMatthew Auld  * Copyright © 2021 Intel Corporation
488be9a0aSMatthew Auld  */
588be9a0aSMatthew Auld 
688be9a0aSMatthew Auld #include <linux/slab.h>
788be9a0aSMatthew Auld 
888be9a0aSMatthew Auld #include <drm/ttm/ttm_placement.h>
9*a3185f91SChristian König #include <drm/ttm/ttm_bo.h>
1088be9a0aSMatthew Auld 
116387a3c4SArunpravin #include <drm/drm_buddy.h>
126387a3c4SArunpravin 
1388be9a0aSMatthew Auld #include "i915_ttm_buddy_manager.h"
1488be9a0aSMatthew Auld 
1588be9a0aSMatthew Auld #include "i915_gem.h"
1688be9a0aSMatthew Auld 
1788be9a0aSMatthew Auld struct i915_ttm_buddy_manager {
1888be9a0aSMatthew Auld 	struct ttm_resource_manager manager;
196387a3c4SArunpravin 	struct drm_buddy mm;
2088be9a0aSMatthew Auld 	struct list_head reserved;
2188be9a0aSMatthew Auld 	struct mutex lock;
2226ffcbbeSMatthew Auld 	unsigned long visible_size;
2326ffcbbeSMatthew Auld 	unsigned long visible_avail;
2426ffcbbeSMatthew Auld 	unsigned long visible_reserved;
25d22632c8SMatthew Auld 	u64 default_page_size;
2688be9a0aSMatthew Auld };
2788be9a0aSMatthew Auld 
2888be9a0aSMatthew Auld static struct i915_ttm_buddy_manager *
to_buddy_manager(struct ttm_resource_manager * man)2988be9a0aSMatthew Auld to_buddy_manager(struct ttm_resource_manager *man)
3088be9a0aSMatthew Auld {
3188be9a0aSMatthew Auld 	return container_of(man, struct i915_ttm_buddy_manager, manager);
3288be9a0aSMatthew Auld }
3388be9a0aSMatthew Auld 
i915_ttm_buddy_man_alloc(struct ttm_resource_manager * man,struct ttm_buffer_object * bo,const struct ttm_place * place,struct ttm_resource ** res)3488be9a0aSMatthew Auld static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
3588be9a0aSMatthew Auld 				    struct ttm_buffer_object *bo,
3688be9a0aSMatthew Auld 				    const struct ttm_place *place,
3788be9a0aSMatthew Auld 				    struct ttm_resource **res)
3888be9a0aSMatthew Auld {
3988be9a0aSMatthew Auld 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
4088be9a0aSMatthew Auld 	struct i915_ttm_buddy_resource *bman_res;
416387a3c4SArunpravin 	struct drm_buddy *mm = &bman->mm;
42afea229fSArunpravin 	unsigned long n_pages, lpfn;
4388be9a0aSMatthew Auld 	u64 min_page_size;
4488be9a0aSMatthew Auld 	u64 size;
4588be9a0aSMatthew Auld 	int err;
4688be9a0aSMatthew Auld 
47afea229fSArunpravin 	lpfn = place->lpfn;
48afea229fSArunpravin 	if (!lpfn)
49afea229fSArunpravin 		lpfn = man->size;
5088be9a0aSMatthew Auld 
5188be9a0aSMatthew Auld 	bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL);
5288be9a0aSMatthew Auld 	if (!bman_res)
5388be9a0aSMatthew Auld 		return -ENOMEM;
5488be9a0aSMatthew Auld 
5588be9a0aSMatthew Auld 	ttm_resource_init(bo, place, &bman_res->base);
5688be9a0aSMatthew Auld 	INIT_LIST_HEAD(&bman_res->blocks);
5788be9a0aSMatthew Auld 	bman_res->mm = mm;
5888be9a0aSMatthew Auld 
59476e4063SArunpravin 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
60476e4063SArunpravin 		bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
61476e4063SArunpravin 
62afea229fSArunpravin 	if (place->fpfn || lpfn != man->size)
63afea229fSArunpravin 		bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
64afea229fSArunpravin 
65e3c92eb4SSomalapuram Amaranath 	GEM_BUG_ON(!bman_res->base.size);
66e3c92eb4SSomalapuram Amaranath 	size = bman_res->base.size;
6788be9a0aSMatthew Auld 
68d22632c8SMatthew Auld 	min_page_size = bman->default_page_size;
69d22632c8SMatthew Auld 	if (bo->page_alignment)
7088be9a0aSMatthew Auld 		min_page_size = bo->page_alignment << PAGE_SHIFT;
71d22632c8SMatthew Auld 
7288be9a0aSMatthew Auld 	GEM_BUG_ON(min_page_size < mm->chunk_size);
73be77bb3eSMatthew Auld 	GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
74afea229fSArunpravin 
75e3c92eb4SSomalapuram Amaranath 	if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn &&
76ecbf2060SMatthew Auld 	    place->flags & TTM_PL_FLAG_CONTIGUOUS) {
77afea229fSArunpravin 		unsigned long pages;
78afea229fSArunpravin 
7988be9a0aSMatthew Auld 		size = roundup_pow_of_two(size);
80afea229fSArunpravin 		min_page_size = size;
81afea229fSArunpravin 
82afea229fSArunpravin 		pages = size >> ilog2(mm->chunk_size);
83afea229fSArunpravin 		if (pages > lpfn)
84afea229fSArunpravin 			lpfn = pages;
8588be9a0aSMatthew Auld 	}
8688be9a0aSMatthew Auld 
87f199bf55SMatthew Auld 	if (size > lpfn << PAGE_SHIFT) {
8888be9a0aSMatthew Auld 		err = -E2BIG;
8988be9a0aSMatthew Auld 		goto err_free_res;
9088be9a0aSMatthew Auld 	}
9188be9a0aSMatthew Auld 
9288be9a0aSMatthew Auld 	n_pages = size >> ilog2(mm->chunk_size);
9388be9a0aSMatthew Auld 
9488be9a0aSMatthew Auld 	mutex_lock(&bman->lock);
9526ffcbbeSMatthew Auld 	if (lpfn <= bman->visible_size && n_pages > bman->visible_avail) {
9626ffcbbeSMatthew Auld 		mutex_unlock(&bman->lock);
9726ffcbbeSMatthew Auld 		err = -ENOSPC;
9826ffcbbeSMatthew Auld 		goto err_free_res;
9926ffcbbeSMatthew Auld 	}
10026ffcbbeSMatthew Auld 
101afea229fSArunpravin 	err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
102afea229fSArunpravin 				     (u64)lpfn << PAGE_SHIFT,
103afea229fSArunpravin 				     (u64)n_pages << PAGE_SHIFT,
104afea229fSArunpravin 				     min_page_size,
105afea229fSArunpravin 				     &bman_res->blocks,
106afea229fSArunpravin 				     bman_res->flags);
107afea229fSArunpravin 	if (unlikely(err))
10888be9a0aSMatthew Auld 		goto err_free_blocks;
10988be9a0aSMatthew Auld 
11095ee2a8bSArunpravin 	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
111e3c92eb4SSomalapuram Amaranath 		u64 original_size = (u64)bman_res->base.size;
11295ee2a8bSArunpravin 
11395ee2a8bSArunpravin 		drm_buddy_block_trim(mm,
11495ee2a8bSArunpravin 				     original_size,
11595ee2a8bSArunpravin 				     &bman_res->blocks);
11695ee2a8bSArunpravin 	}
11795ee2a8bSArunpravin 
11826ffcbbeSMatthew Auld 	if (lpfn <= bman->visible_size) {
119e3c92eb4SSomalapuram Amaranath 		bman_res->used_visible_size = PFN_UP(bman_res->base.size);
12026ffcbbeSMatthew Auld 	} else {
12126ffcbbeSMatthew Auld 		struct drm_buddy_block *block;
12226ffcbbeSMatthew Auld 
12326ffcbbeSMatthew Auld 		list_for_each_entry(block, &bman_res->blocks, link) {
12426ffcbbeSMatthew Auld 			unsigned long start =
12526ffcbbeSMatthew Auld 				drm_buddy_block_offset(block) >> PAGE_SHIFT;
12626ffcbbeSMatthew Auld 
12726ffcbbeSMatthew Auld 			if (start < bman->visible_size) {
12826ffcbbeSMatthew Auld 				unsigned long end = start +
12926ffcbbeSMatthew Auld 					(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
13026ffcbbeSMatthew Auld 
13126ffcbbeSMatthew Auld 				bman_res->used_visible_size +=
13226ffcbbeSMatthew Auld 					min(end, bman->visible_size) - start;
13326ffcbbeSMatthew Auld 			}
13426ffcbbeSMatthew Auld 		}
13526ffcbbeSMatthew Auld 	}
13626ffcbbeSMatthew Auld 
137141f733bSMatthew Auld 	if (bman_res->used_visible_size)
13826ffcbbeSMatthew Auld 		bman->visible_avail -= bman_res->used_visible_size;
139141f733bSMatthew Auld 
14026ffcbbeSMatthew Auld 	mutex_unlock(&bman->lock);
14126ffcbbeSMatthew Auld 
14288be9a0aSMatthew Auld 	*res = &bman_res->base;
14388be9a0aSMatthew Auld 	return 0;
14488be9a0aSMatthew Auld 
14588be9a0aSMatthew Auld err_free_blocks:
1466387a3c4SArunpravin 	drm_buddy_free_list(mm, &bman_res->blocks);
14788be9a0aSMatthew Auld 	mutex_unlock(&bman->lock);
14888be9a0aSMatthew Auld err_free_res:
149de3688e4SChristian König 	ttm_resource_fini(man, &bman_res->base);
15088be9a0aSMatthew Auld 	kfree(bman_res);
15188be9a0aSMatthew Auld 	return err;
15288be9a0aSMatthew Auld }
15388be9a0aSMatthew Auld 
i915_ttm_buddy_man_free(struct ttm_resource_manager * man,struct ttm_resource * res)15488be9a0aSMatthew Auld static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
15588be9a0aSMatthew Auld 				    struct ttm_resource *res)
15688be9a0aSMatthew Auld {
15788be9a0aSMatthew Auld 	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
15888be9a0aSMatthew Auld 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
15988be9a0aSMatthew Auld 
16088be9a0aSMatthew Auld 	mutex_lock(&bman->lock);
1616387a3c4SArunpravin 	drm_buddy_free_list(&bman->mm, &bman_res->blocks);
16226ffcbbeSMatthew Auld 	bman->visible_avail += bman_res->used_visible_size;
16388be9a0aSMatthew Auld 	mutex_unlock(&bman->lock);
16488be9a0aSMatthew Auld 
165de3688e4SChristian König 	ttm_resource_fini(man, res);
16688be9a0aSMatthew Auld 	kfree(bman_res);
16788be9a0aSMatthew Auld }
16888be9a0aSMatthew Auld 
i915_ttm_buddy_man_intersects(struct ttm_resource_manager * man,struct ttm_resource * res,const struct ttm_place * place,size_t size)16992b2b55eSArunpravin Paneer Selvam static bool i915_ttm_buddy_man_intersects(struct ttm_resource_manager *man,
17092b2b55eSArunpravin Paneer Selvam 					  struct ttm_resource *res,
17192b2b55eSArunpravin Paneer Selvam 					  const struct ttm_place *place,
17292b2b55eSArunpravin Paneer Selvam 					  size_t size)
17392b2b55eSArunpravin Paneer Selvam {
17492b2b55eSArunpravin Paneer Selvam 	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
17592b2b55eSArunpravin Paneer Selvam 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
17692b2b55eSArunpravin Paneer Selvam 	struct drm_buddy *mm = &bman->mm;
17792b2b55eSArunpravin Paneer Selvam 	struct drm_buddy_block *block;
17892b2b55eSArunpravin Paneer Selvam 
17992b2b55eSArunpravin Paneer Selvam 	if (!place->fpfn && !place->lpfn)
18092b2b55eSArunpravin Paneer Selvam 		return true;
18192b2b55eSArunpravin Paneer Selvam 
18292b2b55eSArunpravin Paneer Selvam 	GEM_BUG_ON(!place->lpfn);
18392b2b55eSArunpravin Paneer Selvam 
18492b2b55eSArunpravin Paneer Selvam 	/*
18592b2b55eSArunpravin Paneer Selvam 	 * If we just want something mappable then we can quickly check
18692b2b55eSArunpravin Paneer Selvam 	 * if the current victim resource is using any of the CPU
18792b2b55eSArunpravin Paneer Selvam 	 * visible portion.
18892b2b55eSArunpravin Paneer Selvam 	 */
18992b2b55eSArunpravin Paneer Selvam 	if (!place->fpfn &&
19092b2b55eSArunpravin Paneer Selvam 	    place->lpfn == i915_ttm_buddy_man_visible_size(man))
19192b2b55eSArunpravin Paneer Selvam 		return bman_res->used_visible_size > 0;
19292b2b55eSArunpravin Paneer Selvam 
19392b2b55eSArunpravin Paneer Selvam 	/* Check each drm buddy block individually */
19492b2b55eSArunpravin Paneer Selvam 	list_for_each_entry(block, &bman_res->blocks, link) {
19592b2b55eSArunpravin Paneer Selvam 		unsigned long fpfn =
19692b2b55eSArunpravin Paneer Selvam 			drm_buddy_block_offset(block) >> PAGE_SHIFT;
19792b2b55eSArunpravin Paneer Selvam 		unsigned long lpfn = fpfn +
19892b2b55eSArunpravin Paneer Selvam 			(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
19992b2b55eSArunpravin Paneer Selvam 
20092b2b55eSArunpravin Paneer Selvam 		if (place->fpfn < lpfn && place->lpfn > fpfn)
20192b2b55eSArunpravin Paneer Selvam 			return true;
20292b2b55eSArunpravin Paneer Selvam 	}
20392b2b55eSArunpravin Paneer Selvam 
20492b2b55eSArunpravin Paneer Selvam 	return false;
20592b2b55eSArunpravin Paneer Selvam }
20692b2b55eSArunpravin Paneer Selvam 
i915_ttm_buddy_man_compatible(struct ttm_resource_manager * man,struct ttm_resource * res,const struct ttm_place * place,size_t size)20792b2b55eSArunpravin Paneer Selvam static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man,
20892b2b55eSArunpravin Paneer Selvam 					  struct ttm_resource *res,
20992b2b55eSArunpravin Paneer Selvam 					  const struct ttm_place *place,
21092b2b55eSArunpravin Paneer Selvam 					  size_t size)
21192b2b55eSArunpravin Paneer Selvam {
21292b2b55eSArunpravin Paneer Selvam 	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
21392b2b55eSArunpravin Paneer Selvam 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
21492b2b55eSArunpravin Paneer Selvam 	struct drm_buddy *mm = &bman->mm;
21592b2b55eSArunpravin Paneer Selvam 	struct drm_buddy_block *block;
21692b2b55eSArunpravin Paneer Selvam 
21792b2b55eSArunpravin Paneer Selvam 	if (!place->fpfn && !place->lpfn)
21892b2b55eSArunpravin Paneer Selvam 		return true;
21992b2b55eSArunpravin Paneer Selvam 
22092b2b55eSArunpravin Paneer Selvam 	GEM_BUG_ON(!place->lpfn);
22192b2b55eSArunpravin Paneer Selvam 
22292b2b55eSArunpravin Paneer Selvam 	if (!place->fpfn &&
22392b2b55eSArunpravin Paneer Selvam 	    place->lpfn == i915_ttm_buddy_man_visible_size(man))
224e3c92eb4SSomalapuram Amaranath 		return bman_res->used_visible_size == PFN_UP(res->size);
22592b2b55eSArunpravin Paneer Selvam 
22692b2b55eSArunpravin Paneer Selvam 	/* Check each drm buddy block individually */
22792b2b55eSArunpravin Paneer Selvam 	list_for_each_entry(block, &bman_res->blocks, link) {
22892b2b55eSArunpravin Paneer Selvam 		unsigned long fpfn =
22992b2b55eSArunpravin Paneer Selvam 			drm_buddy_block_offset(block) >> PAGE_SHIFT;
23092b2b55eSArunpravin Paneer Selvam 		unsigned long lpfn = fpfn +
23192b2b55eSArunpravin Paneer Selvam 			(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
23292b2b55eSArunpravin Paneer Selvam 
23392b2b55eSArunpravin Paneer Selvam 		if (fpfn < place->fpfn || lpfn > place->lpfn)
23492b2b55eSArunpravin Paneer Selvam 			return false;
23592b2b55eSArunpravin Paneer Selvam 	}
23692b2b55eSArunpravin Paneer Selvam 
23792b2b55eSArunpravin Paneer Selvam 	return true;
23892b2b55eSArunpravin Paneer Selvam }
23992b2b55eSArunpravin Paneer Selvam 
i915_ttm_buddy_man_debug(struct ttm_resource_manager * man,struct drm_printer * printer)2405359b745SMatthew Auld static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
2415359b745SMatthew Auld 				     struct drm_printer *printer)
2425359b745SMatthew Auld {
2435359b745SMatthew Auld 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
2446387a3c4SArunpravin 	struct drm_buddy_block *block;
2455359b745SMatthew Auld 
2465359b745SMatthew Auld 	mutex_lock(&bman->lock);
2475359b745SMatthew Auld 	drm_printf(printer, "default_page_size: %lluKiB\n",
2485359b745SMatthew Auld 		   bman->default_page_size >> 10);
24926ffcbbeSMatthew Auld 	drm_printf(printer, "visible_avail: %lluMiB\n",
25026ffcbbeSMatthew Auld 		   (u64)bman->visible_avail << PAGE_SHIFT >> 20);
25126ffcbbeSMatthew Auld 	drm_printf(printer, "visible_size: %lluMiB\n",
25226ffcbbeSMatthew Auld 		   (u64)bman->visible_size << PAGE_SHIFT >> 20);
25326ffcbbeSMatthew Auld 	drm_printf(printer, "visible_reserved: %lluMiB\n",
25426ffcbbeSMatthew Auld 		   (u64)bman->visible_reserved << PAGE_SHIFT >> 20);
2555359b745SMatthew Auld 
2566387a3c4SArunpravin 	drm_buddy_print(&bman->mm, printer);
2575359b745SMatthew Auld 
2585359b745SMatthew Auld 	drm_printf(printer, "reserved:\n");
2595359b745SMatthew Auld 	list_for_each_entry(block, &bman->reserved, link)
2606387a3c4SArunpravin 		drm_buddy_block_print(&bman->mm, block, printer);
2615359b745SMatthew Auld 	mutex_unlock(&bman->lock);
2625359b745SMatthew Auld }
2635359b745SMatthew Auld 
26488be9a0aSMatthew Auld static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
26588be9a0aSMatthew Auld 	.alloc = i915_ttm_buddy_man_alloc,
26688be9a0aSMatthew Auld 	.free = i915_ttm_buddy_man_free,
26792b2b55eSArunpravin Paneer Selvam 	.intersects = i915_ttm_buddy_man_intersects,
26892b2b55eSArunpravin Paneer Selvam 	.compatible = i915_ttm_buddy_man_compatible,
2695359b745SMatthew Auld 	.debug = i915_ttm_buddy_man_debug,
27088be9a0aSMatthew Auld };
27188be9a0aSMatthew Auld 
27288be9a0aSMatthew Auld /**
27388be9a0aSMatthew Auld  * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager
27488be9a0aSMatthew Auld  * @bdev: The ttm device
27588be9a0aSMatthew Auld  * @type: Memory type we want to manage
27688be9a0aSMatthew Auld  * @use_tt: Set use_tt for the manager
27788be9a0aSMatthew Auld  * @size: The size in bytes to manage
27826ffcbbeSMatthew Auld  * @visible_size: The CPU visible size in bytes to manage
279d22632c8SMatthew Auld  * @default_page_size: The default minimum page size in bytes for allocations,
280d22632c8SMatthew Auld  * this must be at least as large as @chunk_size, and can be overridden by
281d22632c8SMatthew Auld  * setting the BO page_alignment, to be larger or smaller as needed.
28288be9a0aSMatthew Auld  * @chunk_size: The minimum page size in bytes for our allocations i.e
28388be9a0aSMatthew Auld  * order-zero
28488be9a0aSMatthew Auld  *
28588be9a0aSMatthew Auld  * Note that the starting address is assumed to be zero here, since this
28688be9a0aSMatthew Auld  * simplifies keeping the property where allocated blocks having natural
28788be9a0aSMatthew Auld  * power-of-two alignment. So long as the real starting address is some large
28888be9a0aSMatthew Auld  * power-of-two, or naturally start from zero, then this should be fine.  Also
28988be9a0aSMatthew Auld  * the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment
29088be9a0aSMatthew Auld  * if say there is some unusable range from the start of the region. We can
29188be9a0aSMatthew Auld  * revisit this in the future and make the interface accept an actual starting
29288be9a0aSMatthew Auld  * offset and let it take care of the rest.
29388be9a0aSMatthew Auld  *
29488be9a0aSMatthew Auld  * Note that if the @size is not aligned to the @chunk_size then we perform the
29588be9a0aSMatthew Auld  * required rounding to get the usable size. The final size in pages can be
29688be9a0aSMatthew Auld  * taken from &ttm_resource_manager.size.
29788be9a0aSMatthew Auld  *
29888be9a0aSMatthew Auld  * Return: 0 on success, negative error code on failure.
29988be9a0aSMatthew Auld  */
i915_ttm_buddy_man_init(struct ttm_device * bdev,unsigned int type,bool use_tt,u64 size,u64 visible_size,u64 default_page_size,u64 chunk_size)30088be9a0aSMatthew Auld int i915_ttm_buddy_man_init(struct ttm_device *bdev,
30188be9a0aSMatthew Auld 			    unsigned int type, bool use_tt,
30226ffcbbeSMatthew Auld 			    u64 size, u64 visible_size, u64 default_page_size,
303d22632c8SMatthew Auld 			    u64 chunk_size)
30488be9a0aSMatthew Auld {
30588be9a0aSMatthew Auld 	struct ttm_resource_manager *man;
30688be9a0aSMatthew Auld 	struct i915_ttm_buddy_manager *bman;
30788be9a0aSMatthew Auld 	int err;
30888be9a0aSMatthew Auld 
30988be9a0aSMatthew Auld 	bman = kzalloc(sizeof(*bman), GFP_KERNEL);
31088be9a0aSMatthew Auld 	if (!bman)
31188be9a0aSMatthew Auld 		return -ENOMEM;
31288be9a0aSMatthew Auld 
3136387a3c4SArunpravin 	err = drm_buddy_init(&bman->mm, size, chunk_size);
31488be9a0aSMatthew Auld 	if (err)
31588be9a0aSMatthew Auld 		goto err_free_bman;
31688be9a0aSMatthew Auld 
31788be9a0aSMatthew Auld 	mutex_init(&bman->lock);
31888be9a0aSMatthew Auld 	INIT_LIST_HEAD(&bman->reserved);
319d22632c8SMatthew Auld 	GEM_BUG_ON(default_page_size < chunk_size);
320d22632c8SMatthew Auld 	bman->default_page_size = default_page_size;
32126ffcbbeSMatthew Auld 	bman->visible_size = visible_size >> PAGE_SHIFT;
32226ffcbbeSMatthew Auld 	bman->visible_avail = bman->visible_size;
32388be9a0aSMatthew Auld 
32488be9a0aSMatthew Auld 	man = &bman->manager;
32588be9a0aSMatthew Auld 	man->use_tt = use_tt;
32688be9a0aSMatthew Auld 	man->func = &i915_ttm_buddy_manager_func;
3273f268ef0SChristian König 	ttm_resource_manager_init(man, bdev, bman->mm.size >> PAGE_SHIFT);
32888be9a0aSMatthew Auld 
32988be9a0aSMatthew Auld 	ttm_resource_manager_set_used(man, true);
33088be9a0aSMatthew Auld 	ttm_set_driver_manager(bdev, type, man);
33188be9a0aSMatthew Auld 
33288be9a0aSMatthew Auld 	return 0;
33388be9a0aSMatthew Auld 
33488be9a0aSMatthew Auld err_free_bman:
33588be9a0aSMatthew Auld 	kfree(bman);
33688be9a0aSMatthew Auld 	return err;
33788be9a0aSMatthew Auld }
33888be9a0aSMatthew Auld 
33988be9a0aSMatthew Auld /**
34088be9a0aSMatthew Auld  * i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager
34188be9a0aSMatthew Auld  * @bdev: The ttm device
34288be9a0aSMatthew Auld  * @type: Memory type we want to manage
34388be9a0aSMatthew Auld  *
34488be9a0aSMatthew Auld  * Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will
34588be9a0aSMatthew Auld  * also be freed for us here.
34688be9a0aSMatthew Auld  *
34788be9a0aSMatthew Auld  * Return: 0 on success, negative error code on failure.
34888be9a0aSMatthew Auld  */
i915_ttm_buddy_man_fini(struct ttm_device * bdev,unsigned int type)34988be9a0aSMatthew Auld int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
35088be9a0aSMatthew Auld {
35188be9a0aSMatthew Auld 	struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
35288be9a0aSMatthew Auld 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
3536387a3c4SArunpravin 	struct drm_buddy *mm = &bman->mm;
35488be9a0aSMatthew Auld 	int ret;
35588be9a0aSMatthew Auld 
35688be9a0aSMatthew Auld 	ttm_resource_manager_set_used(man, false);
35788be9a0aSMatthew Auld 
35888be9a0aSMatthew Auld 	ret = ttm_resource_manager_evict_all(bdev, man);
35988be9a0aSMatthew Auld 	if (ret)
36088be9a0aSMatthew Auld 		return ret;
36188be9a0aSMatthew Auld 
36288be9a0aSMatthew Auld 	ttm_set_driver_manager(bdev, type, NULL);
36388be9a0aSMatthew Auld 
36488be9a0aSMatthew Auld 	mutex_lock(&bman->lock);
3656387a3c4SArunpravin 	drm_buddy_free_list(mm, &bman->reserved);
3666387a3c4SArunpravin 	drm_buddy_fini(mm);
36726ffcbbeSMatthew Auld 	bman->visible_avail += bman->visible_reserved;
36826ffcbbeSMatthew Auld 	WARN_ON_ONCE(bman->visible_avail != bman->visible_size);
36988be9a0aSMatthew Auld 	mutex_unlock(&bman->lock);
37088be9a0aSMatthew Auld 
37188be9a0aSMatthew Auld 	ttm_resource_manager_cleanup(man);
37288be9a0aSMatthew Auld 	kfree(bman);
37388be9a0aSMatthew Auld 
37488be9a0aSMatthew Auld 	return 0;
37588be9a0aSMatthew Auld }
37688be9a0aSMatthew Auld 
37788be9a0aSMatthew Auld /**
37888be9a0aSMatthew Auld  * i915_ttm_buddy_man_reserve - Reserve address range
37988be9a0aSMatthew Auld  * @man: The buddy allocator ttm manager
38088be9a0aSMatthew Auld  * @start: The offset in bytes, where the region start is assumed to be zero
38188be9a0aSMatthew Auld  * @size: The size in bytes
38288be9a0aSMatthew Auld  *
38388be9a0aSMatthew Auld  * Note that the starting address for the region is always assumed to be zero.
38488be9a0aSMatthew Auld  *
38588be9a0aSMatthew Auld  * Return: 0 on success, negative error code on failure.
38688be9a0aSMatthew Auld  */
i915_ttm_buddy_man_reserve(struct ttm_resource_manager * man,u64 start,u64 size)38788be9a0aSMatthew Auld int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
38888be9a0aSMatthew Auld 			       u64 start, u64 size)
38988be9a0aSMatthew Auld {
39088be9a0aSMatthew Auld 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
3916387a3c4SArunpravin 	struct drm_buddy *mm = &bman->mm;
39226ffcbbeSMatthew Auld 	unsigned long fpfn = start >> PAGE_SHIFT;
393afea229fSArunpravin 	unsigned long flags = 0;
39488be9a0aSMatthew Auld 	int ret;
39588be9a0aSMatthew Auld 
396afea229fSArunpravin 	flags |= DRM_BUDDY_RANGE_ALLOCATION;
397afea229fSArunpravin 
39888be9a0aSMatthew Auld 	mutex_lock(&bman->lock);
399afea229fSArunpravin 	ret = drm_buddy_alloc_blocks(mm, start,
400afea229fSArunpravin 				     start + size,
401afea229fSArunpravin 				     size, mm->chunk_size,
402afea229fSArunpravin 				     &bman->reserved,
403afea229fSArunpravin 				     flags);
40426ffcbbeSMatthew Auld 
40526ffcbbeSMatthew Auld 	if (fpfn < bman->visible_size) {
40626ffcbbeSMatthew Auld 		unsigned long lpfn = fpfn + (size >> PAGE_SHIFT);
40726ffcbbeSMatthew Auld 		unsigned long visible = min(lpfn, bman->visible_size) - fpfn;
40826ffcbbeSMatthew Auld 
40926ffcbbeSMatthew Auld 		bman->visible_reserved += visible;
41026ffcbbeSMatthew Auld 		bman->visible_avail -= visible;
41126ffcbbeSMatthew Auld 	}
41288be9a0aSMatthew Auld 	mutex_unlock(&bman->lock);
41388be9a0aSMatthew Auld 
41488be9a0aSMatthew Auld 	return ret;
41588be9a0aSMatthew Auld }
41688be9a0aSMatthew Auld 
41726ffcbbeSMatthew Auld /**
41826ffcbbeSMatthew Auld  * i915_ttm_buddy_man_visible_size - Return the size of the CPU visible portion
41926ffcbbeSMatthew Auld  * in pages.
42026ffcbbeSMatthew Auld  * @man: The buddy allocator ttm manager
42126ffcbbeSMatthew Auld  */
i915_ttm_buddy_man_visible_size(struct ttm_resource_manager * man)42226ffcbbeSMatthew Auld u64 i915_ttm_buddy_man_visible_size(struct ttm_resource_manager *man)
42326ffcbbeSMatthew Auld {
42426ffcbbeSMatthew Auld 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
42526ffcbbeSMatthew Auld 
42626ffcbbeSMatthew Auld 	return bman->visible_size;
42726ffcbbeSMatthew Auld }
428fb87550dSMatthew Auld 
429141f733bSMatthew Auld /**
430141f733bSMatthew Auld  * i915_ttm_buddy_man_avail - Query the avail tracking for the manager.
431141f733bSMatthew Auld  *
432141f733bSMatthew Auld  * @man: The buddy allocator ttm manager
433141f733bSMatthew Auld  * @avail: The total available memory in pages for the entire manager.
434141f733bSMatthew Auld  * @visible_avail: The total available memory in pages for the CPU visible
435141f733bSMatthew Auld  * portion. Note that this will always give the same value as @avail on
436141f733bSMatthew Auld  * configurations that don't have a small BAR.
437141f733bSMatthew Auld  */
i915_ttm_buddy_man_avail(struct ttm_resource_manager * man,u64 * avail,u64 * visible_avail)438141f733bSMatthew Auld void i915_ttm_buddy_man_avail(struct ttm_resource_manager *man,
439141f733bSMatthew Auld 			      u64 *avail, u64 *visible_avail)
440141f733bSMatthew Auld {
441141f733bSMatthew Auld 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
442141f733bSMatthew Auld 
443141f733bSMatthew Auld 	mutex_lock(&bman->lock);
444141f733bSMatthew Auld 	*avail = bman->mm.avail >> PAGE_SHIFT;
445141f733bSMatthew Auld 	*visible_avail = bman->visible_avail;
446141f733bSMatthew Auld 	mutex_unlock(&bman->lock);
447141f733bSMatthew Auld }
448141f733bSMatthew Auld 
449fb87550dSMatthew Auld #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
i915_ttm_buddy_man_force_visible_size(struct ttm_resource_manager * man,u64 size)450fb87550dSMatthew Auld void i915_ttm_buddy_man_force_visible_size(struct ttm_resource_manager *man,
451fb87550dSMatthew Auld 					   u64 size)
452fb87550dSMatthew Auld {
453fb87550dSMatthew Auld 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
454fb87550dSMatthew Auld 
455fb87550dSMatthew Auld 	bman->visible_size = size;
456fb87550dSMatthew Auld }
457fb87550dSMatthew Auld #endif
458