1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include <linux/slab.h> 7 8 #include <drm/ttm/ttm_bo_driver.h> 9 #include <drm/ttm/ttm_placement.h> 10 11 #include <drm/drm_buddy.h> 12 13 #include "i915_ttm_buddy_manager.h" 14 15 #include "i915_gem.h" 16 17 struct i915_ttm_buddy_manager { 18 struct ttm_resource_manager manager; 19 struct drm_buddy mm; 20 struct list_head reserved; 21 struct mutex lock; 22 u64 default_page_size; 23 }; 24 25 static struct i915_ttm_buddy_manager * 26 to_buddy_manager(struct ttm_resource_manager *man) 27 { 28 return container_of(man, struct i915_ttm_buddy_manager, manager); 29 } 30 31 static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, 32 struct ttm_buffer_object *bo, 33 const struct ttm_place *place, 34 struct ttm_resource **res) 35 { 36 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 37 struct i915_ttm_buddy_resource *bman_res; 38 struct drm_buddy *mm = &bman->mm; 39 unsigned long n_pages; 40 unsigned int min_order; 41 u64 min_page_size; 42 u64 size; 43 int err; 44 45 GEM_BUG_ON(place->fpfn || place->lpfn); 46 47 bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL); 48 if (!bman_res) 49 return -ENOMEM; 50 51 ttm_resource_init(bo, place, &bman_res->base); 52 INIT_LIST_HEAD(&bman_res->blocks); 53 bman_res->mm = mm; 54 55 GEM_BUG_ON(!bman_res->base.num_pages); 56 size = bman_res->base.num_pages << PAGE_SHIFT; 57 58 min_page_size = bman->default_page_size; 59 if (bo->page_alignment) 60 min_page_size = bo->page_alignment << PAGE_SHIFT; 61 62 GEM_BUG_ON(min_page_size < mm->chunk_size); 63 min_order = ilog2(min_page_size) - ilog2(mm->chunk_size); 64 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { 65 size = roundup_pow_of_two(size); 66 min_order = ilog2(size) - ilog2(mm->chunk_size); 67 } 68 69 if (size > mm->size) { 70 err = -E2BIG; 71 goto err_free_res; 72 } 73 74 n_pages = size >> ilog2(mm->chunk_size); 75 76 do { 77 struct drm_buddy_block *block; 78 unsigned int order; 79 80 order = fls(n_pages) - 1; 81 GEM_BUG_ON(order > mm->max_order); 82 GEM_BUG_ON(order < min_order); 83 84 do { 85 mutex_lock(&bman->lock); 86 block = drm_buddy_alloc_blocks(mm, order); 87 mutex_unlock(&bman->lock); 88 if (!IS_ERR(block)) 89 break; 90 91 if (order-- == min_order) { 92 err = -ENOSPC; 93 goto err_free_blocks; 94 } 95 } while (1); 96 97 n_pages -= BIT(order); 98 99 list_add_tail(&block->link, &bman_res->blocks); 100 101 if (!n_pages) 102 break; 103 } while (1); 104 105 *res = &bman_res->base; 106 return 0; 107 108 err_free_blocks: 109 mutex_lock(&bman->lock); 110 drm_buddy_free_list(mm, &bman_res->blocks); 111 mutex_unlock(&bman->lock); 112 err_free_res: 113 ttm_resource_fini(man, &bman_res->base); 114 kfree(bman_res); 115 return err; 116 } 117 118 static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man, 119 struct ttm_resource *res) 120 { 121 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); 122 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 123 124 mutex_lock(&bman->lock); 125 drm_buddy_free_list(&bman->mm, &bman_res->blocks); 126 mutex_unlock(&bman->lock); 127 128 ttm_resource_fini(man, res); 129 kfree(bman_res); 130 } 131 132 static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man, 133 struct drm_printer *printer) 134 { 135 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 136 struct drm_buddy_block *block; 137 138 mutex_lock(&bman->lock); 139 drm_printf(printer, "default_page_size: %lluKiB\n", 140 bman->default_page_size >> 10); 141 142 drm_buddy_print(&bman->mm, printer); 143 144 drm_printf(printer, "reserved:\n"); 145 list_for_each_entry(block, &bman->reserved, link) 146 drm_buddy_block_print(&bman->mm, block, printer); 147 mutex_unlock(&bman->lock); 148 } 149 150 static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = { 151 .alloc = i915_ttm_buddy_man_alloc, 152 .free = i915_ttm_buddy_man_free, 153 .debug = i915_ttm_buddy_man_debug, 154 }; 155 156 /** 157 * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager 158 * @bdev: The ttm device 159 * @type: Memory type we want to manage 160 * @use_tt: Set use_tt for the manager 161 * @size: The size in bytes to manage 162 * @default_page_size: The default minimum page size in bytes for allocations, 163 * this must be at least as large as @chunk_size, and can be overridden by 164 * setting the BO page_alignment, to be larger or smaller as needed. 165 * @chunk_size: The minimum page size in bytes for our allocations i.e 166 * order-zero 167 * 168 * Note that the starting address is assumed to be zero here, since this 169 * simplifies keeping the property where allocated blocks having natural 170 * power-of-two alignment. So long as the real starting address is some large 171 * power-of-two, or naturally start from zero, then this should be fine. Also 172 * the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment 173 * if say there is some unusable range from the start of the region. We can 174 * revisit this in the future and make the interface accept an actual starting 175 * offset and let it take care of the rest. 176 * 177 * Note that if the @size is not aligned to the @chunk_size then we perform the 178 * required rounding to get the usable size. The final size in pages can be 179 * taken from &ttm_resource_manager.size. 180 * 181 * Return: 0 on success, negative error code on failure. 182 */ 183 int i915_ttm_buddy_man_init(struct ttm_device *bdev, 184 unsigned int type, bool use_tt, 185 u64 size, u64 default_page_size, 186 u64 chunk_size) 187 { 188 struct ttm_resource_manager *man; 189 struct i915_ttm_buddy_manager *bman; 190 int err; 191 192 bman = kzalloc(sizeof(*bman), GFP_KERNEL); 193 if (!bman) 194 return -ENOMEM; 195 196 err = drm_buddy_init(&bman->mm, size, chunk_size); 197 if (err) 198 goto err_free_bman; 199 200 mutex_init(&bman->lock); 201 INIT_LIST_HEAD(&bman->reserved); 202 GEM_BUG_ON(default_page_size < chunk_size); 203 bman->default_page_size = default_page_size; 204 205 man = &bman->manager; 206 man->use_tt = use_tt; 207 man->func = &i915_ttm_buddy_manager_func; 208 ttm_resource_manager_init(man, bdev, bman->mm.size >> PAGE_SHIFT); 209 210 ttm_resource_manager_set_used(man, true); 211 ttm_set_driver_manager(bdev, type, man); 212 213 return 0; 214 215 err_free_bman: 216 kfree(bman); 217 return err; 218 } 219 220 /** 221 * i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager 222 * @bdev: The ttm device 223 * @type: Memory type we want to manage 224 * 225 * Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will 226 * also be freed for us here. 227 * 228 * Return: 0 on success, negative error code on failure. 229 */ 230 int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type) 231 { 232 struct ttm_resource_manager *man = ttm_manager_type(bdev, type); 233 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 234 struct drm_buddy *mm = &bman->mm; 235 int ret; 236 237 ttm_resource_manager_set_used(man, false); 238 239 ret = ttm_resource_manager_evict_all(bdev, man); 240 if (ret) 241 return ret; 242 243 ttm_set_driver_manager(bdev, type, NULL); 244 245 mutex_lock(&bman->lock); 246 drm_buddy_free_list(mm, &bman->reserved); 247 drm_buddy_fini(mm); 248 mutex_unlock(&bman->lock); 249 250 ttm_resource_manager_cleanup(man); 251 kfree(bman); 252 253 return 0; 254 } 255 256 /** 257 * i915_ttm_buddy_man_reserve - Reserve address range 258 * @man: The buddy allocator ttm manager 259 * @start: The offset in bytes, where the region start is assumed to be zero 260 * @size: The size in bytes 261 * 262 * Note that the starting address for the region is always assumed to be zero. 263 * 264 * Return: 0 on success, negative error code on failure. 265 */ 266 int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man, 267 u64 start, u64 size) 268 { 269 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 270 struct drm_buddy *mm = &bman->mm; 271 int ret; 272 273 mutex_lock(&bman->lock); 274 ret = drm_buddy_alloc_range(mm, &bman->reserved, start, size); 275 mutex_unlock(&bman->lock); 276 277 return ret; 278 } 279 280