1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include <linux/slab.h> 7 8 #include <drm/ttm/ttm_bo_driver.h> 9 #include <drm/ttm/ttm_placement.h> 10 11 #include "i915_ttm_buddy_manager.h" 12 13 #include "i915_buddy.h" 14 #include "i915_gem.h" 15 16 struct i915_ttm_buddy_manager { 17 struct ttm_resource_manager manager; 18 struct i915_buddy_mm mm; 19 struct list_head reserved; 20 struct mutex lock; 21 u64 default_page_size; 22 }; 23 24 static struct i915_ttm_buddy_manager * 25 to_buddy_manager(struct ttm_resource_manager *man) 26 { 27 return container_of(man, struct i915_ttm_buddy_manager, manager); 28 } 29 30 static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, 31 struct ttm_buffer_object *bo, 32 const struct ttm_place *place, 33 struct ttm_resource **res) 34 { 35 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 36 struct i915_ttm_buddy_resource *bman_res; 37 struct i915_buddy_mm *mm = &bman->mm; 38 unsigned long n_pages; 39 unsigned int min_order; 40 u64 min_page_size; 41 u64 size; 42 int err; 43 44 GEM_BUG_ON(place->fpfn || place->lpfn); 45 46 bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL); 47 if (!bman_res) 48 return -ENOMEM; 49 50 ttm_resource_init(bo, place, &bman_res->base); 51 INIT_LIST_HEAD(&bman_res->blocks); 52 bman_res->mm = mm; 53 54 GEM_BUG_ON(!bman_res->base.num_pages); 55 size = bman_res->base.num_pages << PAGE_SHIFT; 56 57 min_page_size = bman->default_page_size; 58 if (bo->page_alignment) 59 min_page_size = bo->page_alignment << PAGE_SHIFT; 60 61 GEM_BUG_ON(min_page_size < mm->chunk_size); 62 min_order = ilog2(min_page_size) - ilog2(mm->chunk_size); 63 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { 64 size = roundup_pow_of_two(size); 65 min_order = ilog2(size) - ilog2(mm->chunk_size); 66 } 67 68 if (size > mm->size) { 69 err = -E2BIG; 70 goto err_free_res; 71 } 72 73 n_pages = size >> ilog2(mm->chunk_size); 74 75 do { 76 struct i915_buddy_block *block; 77 unsigned int order; 78 79 order = fls(n_pages) - 1; 80 GEM_BUG_ON(order > mm->max_order); 81 GEM_BUG_ON(order < min_order); 82 83 do { 84 mutex_lock(&bman->lock); 85 block = i915_buddy_alloc(mm, order); 86 mutex_unlock(&bman->lock); 87 if (!IS_ERR(block)) 88 break; 89 90 if (order-- == min_order) { 91 err = -ENOSPC; 92 goto err_free_blocks; 93 } 94 } while (1); 95 96 n_pages -= BIT(order); 97 98 list_add_tail(&block->link, &bman_res->blocks); 99 100 if (!n_pages) 101 break; 102 } while (1); 103 104 *res = &bman_res->base; 105 return 0; 106 107 err_free_blocks: 108 mutex_lock(&bman->lock); 109 i915_buddy_free_list(mm, &bman_res->blocks); 110 mutex_unlock(&bman->lock); 111 err_free_res: 112 kfree(bman_res); 113 return err; 114 } 115 116 static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man, 117 struct ttm_resource *res) 118 { 119 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); 120 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 121 122 mutex_lock(&bman->lock); 123 i915_buddy_free_list(&bman->mm, &bman_res->blocks); 124 mutex_unlock(&bman->lock); 125 126 kfree(bman_res); 127 } 128 129 static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man, 130 struct drm_printer *printer) 131 { 132 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 133 struct i915_buddy_block *block; 134 135 mutex_lock(&bman->lock); 136 drm_printf(printer, "default_page_size: %lluKiB\n", 137 bman->default_page_size >> 10); 138 139 i915_buddy_print(&bman->mm, printer); 140 141 drm_printf(printer, "reserved:\n"); 142 list_for_each_entry(block, &bman->reserved, link) 143 i915_buddy_block_print(&bman->mm, block, printer); 144 mutex_unlock(&bman->lock); 145 } 146 147 static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = { 148 .alloc = i915_ttm_buddy_man_alloc, 149 .free = i915_ttm_buddy_man_free, 150 .debug = i915_ttm_buddy_man_debug, 151 }; 152 153 /** 154 * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager 155 * @bdev: The ttm device 156 * @type: Memory type we want to manage 157 * @use_tt: Set use_tt for the manager 158 * @size: The size in bytes to manage 159 * @default_page_size: The default minimum page size in bytes for allocations, 160 * this must be at least as large as @chunk_size, and can be overridden by 161 * setting the BO page_alignment, to be larger or smaller as needed. 162 * @chunk_size: The minimum page size in bytes for our allocations i.e 163 * order-zero 164 * 165 * Note that the starting address is assumed to be zero here, since this 166 * simplifies keeping the property where allocated blocks having natural 167 * power-of-two alignment. So long as the real starting address is some large 168 * power-of-two, or naturally start from zero, then this should be fine. Also 169 * the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment 170 * if say there is some unusable range from the start of the region. We can 171 * revisit this in the future and make the interface accept an actual starting 172 * offset and let it take care of the rest. 173 * 174 * Note that if the @size is not aligned to the @chunk_size then we perform the 175 * required rounding to get the usable size. The final size in pages can be 176 * taken from &ttm_resource_manager.size. 177 * 178 * Return: 0 on success, negative error code on failure. 179 */ 180 int i915_ttm_buddy_man_init(struct ttm_device *bdev, 181 unsigned int type, bool use_tt, 182 u64 size, u64 default_page_size, 183 u64 chunk_size) 184 { 185 struct ttm_resource_manager *man; 186 struct i915_ttm_buddy_manager *bman; 187 int err; 188 189 bman = kzalloc(sizeof(*bman), GFP_KERNEL); 190 if (!bman) 191 return -ENOMEM; 192 193 err = i915_buddy_init(&bman->mm, size, chunk_size); 194 if (err) 195 goto err_free_bman; 196 197 mutex_init(&bman->lock); 198 INIT_LIST_HEAD(&bman->reserved); 199 GEM_BUG_ON(default_page_size < chunk_size); 200 bman->default_page_size = default_page_size; 201 202 man = &bman->manager; 203 man->use_tt = use_tt; 204 man->func = &i915_ttm_buddy_manager_func; 205 ttm_resource_manager_init(man, bman->mm.size >> PAGE_SHIFT); 206 207 ttm_resource_manager_set_used(man, true); 208 ttm_set_driver_manager(bdev, type, man); 209 210 return 0; 211 212 err_free_bman: 213 kfree(bman); 214 return err; 215 } 216 217 /** 218 * i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager 219 * @bdev: The ttm device 220 * @type: Memory type we want to manage 221 * 222 * Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will 223 * also be freed for us here. 224 * 225 * Return: 0 on success, negative error code on failure. 226 */ 227 int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type) 228 { 229 struct ttm_resource_manager *man = ttm_manager_type(bdev, type); 230 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 231 struct i915_buddy_mm *mm = &bman->mm; 232 int ret; 233 234 ttm_resource_manager_set_used(man, false); 235 236 ret = ttm_resource_manager_evict_all(bdev, man); 237 if (ret) 238 return ret; 239 240 ttm_set_driver_manager(bdev, type, NULL); 241 242 mutex_lock(&bman->lock); 243 i915_buddy_free_list(mm, &bman->reserved); 244 i915_buddy_fini(mm); 245 mutex_unlock(&bman->lock); 246 247 ttm_resource_manager_cleanup(man); 248 kfree(bman); 249 250 return 0; 251 } 252 253 /** 254 * i915_ttm_buddy_man_reserve - Reserve address range 255 * @man: The buddy allocator ttm manager 256 * @start: The offset in bytes, where the region start is assumed to be zero 257 * @size: The size in bytes 258 * 259 * Note that the starting address for the region is always assumed to be zero. 260 * 261 * Return: 0 on success, negative error code on failure. 262 */ 263 int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man, 264 u64 start, u64 size) 265 { 266 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 267 struct i915_buddy_mm *mm = &bman->mm; 268 int ret; 269 270 mutex_lock(&bman->lock); 271 ret = i915_buddy_alloc_range(mm, &bman->reserved, start, size); 272 mutex_unlock(&bman->lock); 273 274 return ret; 275 } 276 277