1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include <linux/slab.h> 7 8 #include <drm/ttm/ttm_bo_driver.h> 9 #include <drm/ttm/ttm_placement.h> 10 11 #include <drm/drm_buddy.h> 12 13 #include "i915_ttm_buddy_manager.h" 14 15 #include "i915_gem.h" 16 17 struct i915_ttm_buddy_manager { 18 struct ttm_resource_manager manager; 19 struct drm_buddy mm; 20 struct list_head reserved; 21 struct mutex lock; 22 unsigned long visible_size; 23 unsigned long visible_avail; 24 unsigned long visible_reserved; 25 u64 default_page_size; 26 }; 27 28 static struct i915_ttm_buddy_manager * 29 to_buddy_manager(struct ttm_resource_manager *man) 30 { 31 return container_of(man, struct i915_ttm_buddy_manager, manager); 32 } 33 34 static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, 35 struct ttm_buffer_object *bo, 36 const struct ttm_place *place, 37 struct ttm_resource **res) 38 { 39 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 40 struct i915_ttm_buddy_resource *bman_res; 41 struct drm_buddy *mm = &bman->mm; 42 unsigned long n_pages, lpfn; 43 u64 min_page_size; 44 u64 size; 45 int err; 46 47 lpfn = place->lpfn; 48 if (!lpfn) 49 lpfn = man->size; 50 51 bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL); 52 if (!bman_res) 53 return -ENOMEM; 54 55 ttm_resource_init(bo, place, &bman_res->base); 56 INIT_LIST_HEAD(&bman_res->blocks); 57 bman_res->mm = mm; 58 59 if (place->flags & TTM_PL_FLAG_TOPDOWN) 60 bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; 61 62 if (place->fpfn || lpfn != man->size) 63 bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION; 64 65 GEM_BUG_ON(!bman_res->base.num_pages); 66 size = bman_res->base.num_pages << PAGE_SHIFT; 67 68 min_page_size = bman->default_page_size; 69 if (bo->page_alignment) 70 min_page_size = bo->page_alignment << PAGE_SHIFT; 71 72 GEM_BUG_ON(min_page_size < mm->chunk_size); 73 74 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { 75 unsigned long pages; 76 77 size = roundup_pow_of_two(size); 78 min_page_size = size; 79 80 pages = size >> ilog2(mm->chunk_size); 81 if (pages > lpfn) 82 lpfn = pages; 83 } 84 85 if (size > lpfn << PAGE_SHIFT) { 86 err = -E2BIG; 87 goto err_free_res; 88 } 89 90 n_pages = size >> ilog2(mm->chunk_size); 91 92 mutex_lock(&bman->lock); 93 if (lpfn <= bman->visible_size && n_pages > bman->visible_avail) { 94 mutex_unlock(&bman->lock); 95 err = -ENOSPC; 96 goto err_free_res; 97 } 98 99 err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, 100 (u64)lpfn << PAGE_SHIFT, 101 (u64)n_pages << PAGE_SHIFT, 102 min_page_size, 103 &bman_res->blocks, 104 bman_res->flags); 105 mutex_unlock(&bman->lock); 106 if (unlikely(err)) 107 goto err_free_blocks; 108 109 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { 110 u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT; 111 112 mutex_lock(&bman->lock); 113 drm_buddy_block_trim(mm, 114 original_size, 115 &bman_res->blocks); 116 mutex_unlock(&bman->lock); 117 } 118 119 if (lpfn <= bman->visible_size) { 120 bman_res->used_visible_size = bman_res->base.num_pages; 121 } else { 122 struct drm_buddy_block *block; 123 124 list_for_each_entry(block, &bman_res->blocks, link) { 125 unsigned long start = 126 drm_buddy_block_offset(block) >> PAGE_SHIFT; 127 128 if (start < bman->visible_size) { 129 unsigned long end = start + 130 (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); 131 132 bman_res->used_visible_size += 133 min(end, bman->visible_size) - start; 134 } 135 } 136 } 137 138 if (bman_res->used_visible_size) { 139 mutex_lock(&bman->lock); 140 bman->visible_avail -= bman_res->used_visible_size; 141 mutex_unlock(&bman->lock); 142 } 143 144 if (place->lpfn - place->fpfn == n_pages) 145 bman_res->base.start = place->fpfn; 146 else if (lpfn <= bman->visible_size) 147 bman_res->base.start = 0; 148 else 149 bman_res->base.start = bman->visible_size; 150 151 *res = &bman_res->base; 152 return 0; 153 154 err_free_blocks: 155 mutex_lock(&bman->lock); 156 drm_buddy_free_list(mm, &bman_res->blocks); 157 mutex_unlock(&bman->lock); 158 err_free_res: 159 ttm_resource_fini(man, &bman_res->base); 160 kfree(bman_res); 161 return err; 162 } 163 164 static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man, 165 struct ttm_resource *res) 166 { 167 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); 168 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 169 170 mutex_lock(&bman->lock); 171 drm_buddy_free_list(&bman->mm, &bman_res->blocks); 172 bman->visible_avail += bman_res->used_visible_size; 173 mutex_unlock(&bman->lock); 174 175 ttm_resource_fini(man, res); 176 kfree(bman_res); 177 } 178 179 static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man, 180 struct drm_printer *printer) 181 { 182 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 183 struct drm_buddy_block *block; 184 185 mutex_lock(&bman->lock); 186 drm_printf(printer, "default_page_size: %lluKiB\n", 187 bman->default_page_size >> 10); 188 drm_printf(printer, "visible_avail: %lluMiB\n", 189 (u64)bman->visible_avail << PAGE_SHIFT >> 20); 190 drm_printf(printer, "visible_size: %lluMiB\n", 191 (u64)bman->visible_size << PAGE_SHIFT >> 20); 192 drm_printf(printer, "visible_reserved: %lluMiB\n", 193 (u64)bman->visible_reserved << PAGE_SHIFT >> 20); 194 195 drm_buddy_print(&bman->mm, printer); 196 197 drm_printf(printer, "reserved:\n"); 198 list_for_each_entry(block, &bman->reserved, link) 199 drm_buddy_block_print(&bman->mm, block, printer); 200 mutex_unlock(&bman->lock); 201 } 202 203 static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = { 204 .alloc = i915_ttm_buddy_man_alloc, 205 .free = i915_ttm_buddy_man_free, 206 .debug = i915_ttm_buddy_man_debug, 207 }; 208 209 /** 210 * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager 211 * @bdev: The ttm device 212 * @type: Memory type we want to manage 213 * @use_tt: Set use_tt for the manager 214 * @size: The size in bytes to manage 215 * @visible_size: The CPU visible size in bytes to manage 216 * @default_page_size: The default minimum page size in bytes for allocations, 217 * this must be at least as large as @chunk_size, and can be overridden by 218 * setting the BO page_alignment, to be larger or smaller as needed. 219 * @chunk_size: The minimum page size in bytes for our allocations i.e 220 * order-zero 221 * 222 * Note that the starting address is assumed to be zero here, since this 223 * simplifies keeping the property where allocated blocks having natural 224 * power-of-two alignment. So long as the real starting address is some large 225 * power-of-two, or naturally start from zero, then this should be fine. Also 226 * the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment 227 * if say there is some unusable range from the start of the region. We can 228 * revisit this in the future and make the interface accept an actual starting 229 * offset and let it take care of the rest. 230 * 231 * Note that if the @size is not aligned to the @chunk_size then we perform the 232 * required rounding to get the usable size. The final size in pages can be 233 * taken from &ttm_resource_manager.size. 234 * 235 * Return: 0 on success, negative error code on failure. 236 */ 237 int i915_ttm_buddy_man_init(struct ttm_device *bdev, 238 unsigned int type, bool use_tt, 239 u64 size, u64 visible_size, u64 default_page_size, 240 u64 chunk_size) 241 { 242 struct ttm_resource_manager *man; 243 struct i915_ttm_buddy_manager *bman; 244 int err; 245 246 bman = kzalloc(sizeof(*bman), GFP_KERNEL); 247 if (!bman) 248 return -ENOMEM; 249 250 err = drm_buddy_init(&bman->mm, size, chunk_size); 251 if (err) 252 goto err_free_bman; 253 254 mutex_init(&bman->lock); 255 INIT_LIST_HEAD(&bman->reserved); 256 GEM_BUG_ON(default_page_size < chunk_size); 257 bman->default_page_size = default_page_size; 258 bman->visible_size = visible_size >> PAGE_SHIFT; 259 bman->visible_avail = bman->visible_size; 260 261 man = &bman->manager; 262 man->use_tt = use_tt; 263 man->func = &i915_ttm_buddy_manager_func; 264 ttm_resource_manager_init(man, bdev, bman->mm.size >> PAGE_SHIFT); 265 266 ttm_resource_manager_set_used(man, true); 267 ttm_set_driver_manager(bdev, type, man); 268 269 return 0; 270 271 err_free_bman: 272 kfree(bman); 273 return err; 274 } 275 276 /** 277 * i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager 278 * @bdev: The ttm device 279 * @type: Memory type we want to manage 280 * 281 * Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will 282 * also be freed for us here. 283 * 284 * Return: 0 on success, negative error code on failure. 285 */ 286 int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type) 287 { 288 struct ttm_resource_manager *man = ttm_manager_type(bdev, type); 289 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 290 struct drm_buddy *mm = &bman->mm; 291 int ret; 292 293 ttm_resource_manager_set_used(man, false); 294 295 ret = ttm_resource_manager_evict_all(bdev, man); 296 if (ret) 297 return ret; 298 299 ttm_set_driver_manager(bdev, type, NULL); 300 301 mutex_lock(&bman->lock); 302 drm_buddy_free_list(mm, &bman->reserved); 303 drm_buddy_fini(mm); 304 bman->visible_avail += bman->visible_reserved; 305 WARN_ON_ONCE(bman->visible_avail != bman->visible_size); 306 mutex_unlock(&bman->lock); 307 308 ttm_resource_manager_cleanup(man); 309 kfree(bman); 310 311 return 0; 312 } 313 314 /** 315 * i915_ttm_buddy_man_reserve - Reserve address range 316 * @man: The buddy allocator ttm manager 317 * @start: The offset in bytes, where the region start is assumed to be zero 318 * @size: The size in bytes 319 * 320 * Note that the starting address for the region is always assumed to be zero. 321 * 322 * Return: 0 on success, negative error code on failure. 323 */ 324 int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man, 325 u64 start, u64 size) 326 { 327 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 328 struct drm_buddy *mm = &bman->mm; 329 unsigned long fpfn = start >> PAGE_SHIFT; 330 unsigned long flags = 0; 331 int ret; 332 333 flags |= DRM_BUDDY_RANGE_ALLOCATION; 334 335 mutex_lock(&bman->lock); 336 ret = drm_buddy_alloc_blocks(mm, start, 337 start + size, 338 size, mm->chunk_size, 339 &bman->reserved, 340 flags); 341 342 if (fpfn < bman->visible_size) { 343 unsigned long lpfn = fpfn + (size >> PAGE_SHIFT); 344 unsigned long visible = min(lpfn, bman->visible_size) - fpfn; 345 346 bman->visible_reserved += visible; 347 bman->visible_avail -= visible; 348 } 349 mutex_unlock(&bman->lock); 350 351 return ret; 352 } 353 354 /** 355 * i915_ttm_buddy_man_visible_size - Return the size of the CPU visible portion 356 * in pages. 357 * @man: The buddy allocator ttm manager 358 */ 359 u64 i915_ttm_buddy_man_visible_size(struct ttm_resource_manager *man) 360 { 361 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 362 363 return bman->visible_size; 364 } 365 366 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 367 void i915_ttm_buddy_man_force_visible_size(struct ttm_resource_manager *man, 368 u64 size) 369 { 370 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 371 372 bman->visible_size = size; 373 } 374 #endif 375