1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include <linux/slab.h> 7 8 #include <drm/ttm/ttm_bo_driver.h> 9 #include <drm/ttm/ttm_placement.h> 10 11 #include <drm/drm_buddy.h> 12 13 #include "i915_ttm_buddy_manager.h" 14 15 #include "i915_gem.h" 16 17 struct i915_ttm_buddy_manager { 18 struct ttm_resource_manager manager; 19 struct drm_buddy mm; 20 struct list_head reserved; 21 struct mutex lock; 22 unsigned long visible_size; 23 unsigned long visible_avail; 24 unsigned long visible_reserved; 25 u64 default_page_size; 26 }; 27 28 static struct i915_ttm_buddy_manager * 29 to_buddy_manager(struct ttm_resource_manager *man) 30 { 31 return container_of(man, struct i915_ttm_buddy_manager, manager); 32 } 33 34 static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, 35 struct ttm_buffer_object *bo, 36 const struct ttm_place *place, 37 struct ttm_resource **res) 38 { 39 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 40 struct i915_ttm_buddy_resource *bman_res; 41 struct drm_buddy *mm = &bman->mm; 42 unsigned long n_pages, lpfn; 43 u64 min_page_size; 44 u64 size; 45 int err; 46 47 lpfn = place->lpfn; 48 if (!lpfn) 49 lpfn = man->size; 50 51 bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL); 52 if (!bman_res) 53 return -ENOMEM; 54 55 ttm_resource_init(bo, place, &bman_res->base); 56 INIT_LIST_HEAD(&bman_res->blocks); 57 bman_res->mm = mm; 58 59 if (place->flags & TTM_PL_FLAG_TOPDOWN) 60 bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; 61 62 if (place->fpfn || lpfn != man->size) 63 bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION; 64 65 GEM_BUG_ON(!bman_res->base.num_pages); 66 size = bman_res->base.num_pages << PAGE_SHIFT; 67 68 min_page_size = bman->default_page_size; 69 if (bo->page_alignment) 70 min_page_size = bo->page_alignment << PAGE_SHIFT; 71 72 GEM_BUG_ON(min_page_size < mm->chunk_size); 73 GEM_BUG_ON(!IS_ALIGNED(size, min_page_size)); 74 75 if (place->fpfn + bman_res->base.num_pages != place->lpfn && 76 place->flags & TTM_PL_FLAG_CONTIGUOUS) { 77 unsigned long pages; 78 79 size = roundup_pow_of_two(size); 80 min_page_size = size; 81 82 pages = size >> ilog2(mm->chunk_size); 83 if (pages > lpfn) 84 lpfn = pages; 85 } 86 87 if (size > lpfn << PAGE_SHIFT) { 88 err = -E2BIG; 89 goto err_free_res; 90 } 91 92 n_pages = size >> ilog2(mm->chunk_size); 93 94 mutex_lock(&bman->lock); 95 if (lpfn <= bman->visible_size && n_pages > bman->visible_avail) { 96 mutex_unlock(&bman->lock); 97 err = -ENOSPC; 98 goto err_free_res; 99 } 100 101 err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, 102 (u64)lpfn << PAGE_SHIFT, 103 (u64)n_pages << PAGE_SHIFT, 104 min_page_size, 105 &bman_res->blocks, 106 bman_res->flags); 107 if (unlikely(err)) 108 goto err_free_blocks; 109 110 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { 111 u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT; 112 113 drm_buddy_block_trim(mm, 114 original_size, 115 &bman_res->blocks); 116 } 117 118 if (lpfn <= bman->visible_size) { 119 bman_res->used_visible_size = bman_res->base.num_pages; 120 } else { 121 struct drm_buddy_block *block; 122 123 list_for_each_entry(block, &bman_res->blocks, link) { 124 unsigned long start = 125 drm_buddy_block_offset(block) >> PAGE_SHIFT; 126 127 if (start < bman->visible_size) { 128 unsigned long end = start + 129 (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); 130 131 bman_res->used_visible_size += 132 min(end, bman->visible_size) - start; 133 } 134 } 135 } 136 137 if (bman_res->used_visible_size) 138 bman->visible_avail -= bman_res->used_visible_size; 139 140 mutex_unlock(&bman->lock); 141 142 if (place->lpfn - place->fpfn == n_pages) 143 bman_res->base.start = place->fpfn; 144 else if (lpfn <= bman->visible_size) 145 bman_res->base.start = 0; 146 else 147 bman_res->base.start = bman->visible_size; 148 149 *res = &bman_res->base; 150 return 0; 151 152 err_free_blocks: 153 drm_buddy_free_list(mm, &bman_res->blocks); 154 mutex_unlock(&bman->lock); 155 err_free_res: 156 ttm_resource_fini(man, &bman_res->base); 157 kfree(bman_res); 158 return err; 159 } 160 161 static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man, 162 struct ttm_resource *res) 163 { 164 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); 165 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 166 167 mutex_lock(&bman->lock); 168 drm_buddy_free_list(&bman->mm, &bman_res->blocks); 169 bman->visible_avail += bman_res->used_visible_size; 170 mutex_unlock(&bman->lock); 171 172 ttm_resource_fini(man, res); 173 kfree(bman_res); 174 } 175 176 static bool i915_ttm_buddy_man_intersects(struct ttm_resource_manager *man, 177 struct ttm_resource *res, 178 const struct ttm_place *place, 179 size_t size) 180 { 181 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); 182 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 183 struct drm_buddy *mm = &bman->mm; 184 struct drm_buddy_block *block; 185 186 if (!place->fpfn && !place->lpfn) 187 return true; 188 189 GEM_BUG_ON(!place->lpfn); 190 191 /* 192 * If we just want something mappable then we can quickly check 193 * if the current victim resource is using any of the CPU 194 * visible portion. 195 */ 196 if (!place->fpfn && 197 place->lpfn == i915_ttm_buddy_man_visible_size(man)) 198 return bman_res->used_visible_size > 0; 199 200 /* Check each drm buddy block individually */ 201 list_for_each_entry(block, &bman_res->blocks, link) { 202 unsigned long fpfn = 203 drm_buddy_block_offset(block) >> PAGE_SHIFT; 204 unsigned long lpfn = fpfn + 205 (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); 206 207 if (place->fpfn < lpfn && place->lpfn > fpfn) 208 return true; 209 } 210 211 return false; 212 } 213 214 static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man, 215 struct ttm_resource *res, 216 const struct ttm_place *place, 217 size_t size) 218 { 219 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); 220 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 221 struct drm_buddy *mm = &bman->mm; 222 struct drm_buddy_block *block; 223 224 if (!place->fpfn && !place->lpfn) 225 return true; 226 227 GEM_BUG_ON(!place->lpfn); 228 229 if (!place->fpfn && 230 place->lpfn == i915_ttm_buddy_man_visible_size(man)) 231 return bman_res->used_visible_size == res->num_pages; 232 233 /* Check each drm buddy block individually */ 234 list_for_each_entry(block, &bman_res->blocks, link) { 235 unsigned long fpfn = 236 drm_buddy_block_offset(block) >> PAGE_SHIFT; 237 unsigned long lpfn = fpfn + 238 (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); 239 240 if (fpfn < place->fpfn || lpfn > place->lpfn) 241 return false; 242 } 243 244 return true; 245 } 246 247 static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man, 248 struct drm_printer *printer) 249 { 250 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 251 struct drm_buddy_block *block; 252 253 mutex_lock(&bman->lock); 254 drm_printf(printer, "default_page_size: %lluKiB\n", 255 bman->default_page_size >> 10); 256 drm_printf(printer, "visible_avail: %lluMiB\n", 257 (u64)bman->visible_avail << PAGE_SHIFT >> 20); 258 drm_printf(printer, "visible_size: %lluMiB\n", 259 (u64)bman->visible_size << PAGE_SHIFT >> 20); 260 drm_printf(printer, "visible_reserved: %lluMiB\n", 261 (u64)bman->visible_reserved << PAGE_SHIFT >> 20); 262 263 drm_buddy_print(&bman->mm, printer); 264 265 drm_printf(printer, "reserved:\n"); 266 list_for_each_entry(block, &bman->reserved, link) 267 drm_buddy_block_print(&bman->mm, block, printer); 268 mutex_unlock(&bman->lock); 269 } 270 271 static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = { 272 .alloc = i915_ttm_buddy_man_alloc, 273 .free = i915_ttm_buddy_man_free, 274 .intersects = i915_ttm_buddy_man_intersects, 275 .compatible = i915_ttm_buddy_man_compatible, 276 .debug = i915_ttm_buddy_man_debug, 277 }; 278 279 /** 280 * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager 281 * @bdev: The ttm device 282 * @type: Memory type we want to manage 283 * @use_tt: Set use_tt for the manager 284 * @size: The size in bytes to manage 285 * @visible_size: The CPU visible size in bytes to manage 286 * @default_page_size: The default minimum page size in bytes for allocations, 287 * this must be at least as large as @chunk_size, and can be overridden by 288 * setting the BO page_alignment, to be larger or smaller as needed. 289 * @chunk_size: The minimum page size in bytes for our allocations i.e 290 * order-zero 291 * 292 * Note that the starting address is assumed to be zero here, since this 293 * simplifies keeping the property where allocated blocks having natural 294 * power-of-two alignment. So long as the real starting address is some large 295 * power-of-two, or naturally start from zero, then this should be fine. Also 296 * the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment 297 * if say there is some unusable range from the start of the region. We can 298 * revisit this in the future and make the interface accept an actual starting 299 * offset and let it take care of the rest. 300 * 301 * Note that if the @size is not aligned to the @chunk_size then we perform the 302 * required rounding to get the usable size. The final size in pages can be 303 * taken from &ttm_resource_manager.size. 304 * 305 * Return: 0 on success, negative error code on failure. 306 */ 307 int i915_ttm_buddy_man_init(struct ttm_device *bdev, 308 unsigned int type, bool use_tt, 309 u64 size, u64 visible_size, u64 default_page_size, 310 u64 chunk_size) 311 { 312 struct ttm_resource_manager *man; 313 struct i915_ttm_buddy_manager *bman; 314 int err; 315 316 bman = kzalloc(sizeof(*bman), GFP_KERNEL); 317 if (!bman) 318 return -ENOMEM; 319 320 err = drm_buddy_init(&bman->mm, size, chunk_size); 321 if (err) 322 goto err_free_bman; 323 324 mutex_init(&bman->lock); 325 INIT_LIST_HEAD(&bman->reserved); 326 GEM_BUG_ON(default_page_size < chunk_size); 327 bman->default_page_size = default_page_size; 328 bman->visible_size = visible_size >> PAGE_SHIFT; 329 bman->visible_avail = bman->visible_size; 330 331 man = &bman->manager; 332 man->use_tt = use_tt; 333 man->func = &i915_ttm_buddy_manager_func; 334 ttm_resource_manager_init(man, bdev, bman->mm.size >> PAGE_SHIFT); 335 336 ttm_resource_manager_set_used(man, true); 337 ttm_set_driver_manager(bdev, type, man); 338 339 return 0; 340 341 err_free_bman: 342 kfree(bman); 343 return err; 344 } 345 346 /** 347 * i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager 348 * @bdev: The ttm device 349 * @type: Memory type we want to manage 350 * 351 * Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will 352 * also be freed for us here. 353 * 354 * Return: 0 on success, negative error code on failure. 355 */ 356 int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type) 357 { 358 struct ttm_resource_manager *man = ttm_manager_type(bdev, type); 359 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 360 struct drm_buddy *mm = &bman->mm; 361 int ret; 362 363 ttm_resource_manager_set_used(man, false); 364 365 ret = ttm_resource_manager_evict_all(bdev, man); 366 if (ret) 367 return ret; 368 369 ttm_set_driver_manager(bdev, type, NULL); 370 371 mutex_lock(&bman->lock); 372 drm_buddy_free_list(mm, &bman->reserved); 373 drm_buddy_fini(mm); 374 bman->visible_avail += bman->visible_reserved; 375 WARN_ON_ONCE(bman->visible_avail != bman->visible_size); 376 mutex_unlock(&bman->lock); 377 378 ttm_resource_manager_cleanup(man); 379 kfree(bman); 380 381 return 0; 382 } 383 384 /** 385 * i915_ttm_buddy_man_reserve - Reserve address range 386 * @man: The buddy allocator ttm manager 387 * @start: The offset in bytes, where the region start is assumed to be zero 388 * @size: The size in bytes 389 * 390 * Note that the starting address for the region is always assumed to be zero. 391 * 392 * Return: 0 on success, negative error code on failure. 393 */ 394 int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man, 395 u64 start, u64 size) 396 { 397 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 398 struct drm_buddy *mm = &bman->mm; 399 unsigned long fpfn = start >> PAGE_SHIFT; 400 unsigned long flags = 0; 401 int ret; 402 403 flags |= DRM_BUDDY_RANGE_ALLOCATION; 404 405 mutex_lock(&bman->lock); 406 ret = drm_buddy_alloc_blocks(mm, start, 407 start + size, 408 size, mm->chunk_size, 409 &bman->reserved, 410 flags); 411 412 if (fpfn < bman->visible_size) { 413 unsigned long lpfn = fpfn + (size >> PAGE_SHIFT); 414 unsigned long visible = min(lpfn, bman->visible_size) - fpfn; 415 416 bman->visible_reserved += visible; 417 bman->visible_avail -= visible; 418 } 419 mutex_unlock(&bman->lock); 420 421 return ret; 422 } 423 424 /** 425 * i915_ttm_buddy_man_visible_size - Return the size of the CPU visible portion 426 * in pages. 427 * @man: The buddy allocator ttm manager 428 */ 429 u64 i915_ttm_buddy_man_visible_size(struct ttm_resource_manager *man) 430 { 431 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 432 433 return bman->visible_size; 434 } 435 436 /** 437 * i915_ttm_buddy_man_avail - Query the avail tracking for the manager. 438 * 439 * @man: The buddy allocator ttm manager 440 * @avail: The total available memory in pages for the entire manager. 441 * @visible_avail: The total available memory in pages for the CPU visible 442 * portion. Note that this will always give the same value as @avail on 443 * configurations that don't have a small BAR. 444 */ 445 void i915_ttm_buddy_man_avail(struct ttm_resource_manager *man, 446 u64 *avail, u64 *visible_avail) 447 { 448 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 449 450 mutex_lock(&bman->lock); 451 *avail = bman->mm.avail >> PAGE_SHIFT; 452 *visible_avail = bman->visible_avail; 453 mutex_unlock(&bman->lock); 454 } 455 456 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 457 void i915_ttm_buddy_man_force_visible_size(struct ttm_resource_manager *man, 458 u64 size) 459 { 460 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 461 462 bman->visible_size = size; 463 } 464 #endif 465