1 /************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * 27 **************************************************************************/ 28 29 /* 30 * Generic simple memory manager implementation. Intended to be used as a base 31 * class implementation for more advanced memory managers. 32 * 33 * Note that the algorithm used is quite simple and there might be substantial 34 * performance gains if a smarter free list is implemented. Currently it is just an 35 * unordered stack of free regions. This could easily be improved if an RB-tree 36 * is used instead. At least if we expect heavy fragmentation. 37 * 38 * Aligned allocations can also see improvement. 39 * 40 * Authors: 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 42 */ 43 44 #include <drm/drmP.h> 45 #include <drm/drm_mm.h> 46 #include <linux/slab.h> 47 #include <linux/seq_file.h> 48 #include <linux/export.h> 49 #include <linux/interval_tree_generic.h> 50 51 /** 52 * DOC: Overview 53 * 54 * drm_mm provides a simple range allocator. The drivers are free to use the 55 * resource allocator from the linux core if it suits them, the upside of drm_mm 56 * is that it's in the DRM core. Which means that it's easier to extend for 57 * some of the crazier special purpose needs of gpus. 58 * 59 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. 60 * Drivers are free to embed either of them into their own suitable 61 * datastructures. drm_mm itself will not do any allocations of its own, so if 62 * drivers choose not to embed nodes they need to still allocate them 63 * themselves. 64 * 65 * The range allocator also supports reservation of preallocated blocks. This is 66 * useful for taking over initial mode setting configurations from the firmware, 67 * where an object needs to be created which exactly matches the firmware's 68 * scanout target. As long as the range is still free it can be inserted anytime 69 * after the allocator is initialized, which helps with avoiding looped 70 * depencies in the driver load sequence. 71 * 72 * drm_mm maintains a stack of most recently freed holes, which of all 73 * simplistic datastructures seems to be a fairly decent approach to clustering 74 * allocations and avoiding too much fragmentation. This means free space 75 * searches are O(num_holes). Given that all the fancy features drm_mm supports 76 * something better would be fairly complex and since gfx thrashing is a fairly 77 * steep cliff not a real concern. Removing a node again is O(1). 78 * 79 * drm_mm supports a few features: Alignment and range restrictions can be 80 * supplied. Further more every &drm_mm_node has a color value (which is just an 81 * opaqua unsigned long) which in conjunction with a driver callback can be used 82 * to implement sophisticated placement restrictions. The i915 DRM driver uses 83 * this to implement guard pages between incompatible caching domains in the 84 * graphics TT. 85 * 86 * Two behaviors are supported for searching and allocating: bottom-up and top-down. 87 * The default is bottom-up. Top-down allocation can be used if the memory area 88 * has different restrictions, or just to reduce fragmentation. 89 * 90 * Finally iteration helpers to walk all nodes and all holes are provided as are 91 * some basic allocator dumpers for debugging. 92 */ 93 94 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 95 u64 size, 96 unsigned alignment, 97 unsigned long color, 98 enum drm_mm_search_flags flags); 99 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 100 u64 size, 101 unsigned alignment, 102 unsigned long color, 103 u64 start, 104 u64 end, 105 enum drm_mm_search_flags flags); 106 107 #ifdef CONFIG_DRM_DEBUG_MM 108 #include <linux/stackdepot.h> 109 110 #define STACKDEPTH 32 111 #define BUFSZ 4096 112 113 static noinline void save_stack(struct drm_mm_node *node) 114 { 115 unsigned long entries[STACKDEPTH]; 116 struct stack_trace trace = { 117 .entries = entries, 118 .max_entries = STACKDEPTH, 119 .skip = 1 120 }; 121 122 save_stack_trace(&trace); 123 if (trace.nr_entries != 0 && 124 trace.entries[trace.nr_entries-1] == ULONG_MAX) 125 trace.nr_entries--; 126 127 /* May be called under spinlock, so avoid sleeping */ 128 node->stack = depot_save_stack(&trace, GFP_NOWAIT); 129 } 130 131 static void show_leaks(struct drm_mm *mm) 132 { 133 struct drm_mm_node *node; 134 unsigned long entries[STACKDEPTH]; 135 char *buf; 136 137 buf = kmalloc(BUFSZ, GFP_KERNEL); 138 if (!buf) 139 return; 140 141 list_for_each_entry(node, &mm->head_node.node_list, node_list) { 142 struct stack_trace trace = { 143 .entries = entries, 144 .max_entries = STACKDEPTH 145 }; 146 147 if (!node->stack) { 148 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n", 149 node->start, node->size); 150 continue; 151 } 152 153 depot_fetch_stack(node->stack, &trace); 154 snprint_stack_trace(buf, BUFSZ, &trace, 0); 155 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", 156 node->start, node->size, buf); 157 } 158 159 kfree(buf); 160 } 161 162 #undef STACKDEPTH 163 #undef BUFSZ 164 #else 165 static void save_stack(struct drm_mm_node *node) { } 166 static void show_leaks(struct drm_mm *mm) { } 167 #endif 168 169 #define START(node) ((node)->start) 170 #define LAST(node) ((node)->start + (node)->size - 1) 171 172 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, 173 u64, __subtree_last, 174 START, LAST, static inline, drm_mm_interval_tree) 175 176 struct drm_mm_node * 177 __drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last) 178 { 179 return drm_mm_interval_tree_iter_first(&mm->interval_tree, 180 start, last); 181 } 182 EXPORT_SYMBOL(__drm_mm_interval_first); 183 184 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, 185 struct drm_mm_node *node) 186 { 187 struct drm_mm *mm = hole_node->mm; 188 struct rb_node **link, *rb; 189 struct drm_mm_node *parent; 190 191 node->__subtree_last = LAST(node); 192 193 if (hole_node->allocated) { 194 rb = &hole_node->rb; 195 while (rb) { 196 parent = rb_entry(rb, struct drm_mm_node, rb); 197 if (parent->__subtree_last >= node->__subtree_last) 198 break; 199 200 parent->__subtree_last = node->__subtree_last; 201 rb = rb_parent(rb); 202 } 203 204 rb = &hole_node->rb; 205 link = &hole_node->rb.rb_right; 206 } else { 207 rb = NULL; 208 link = &mm->interval_tree.rb_node; 209 } 210 211 while (*link) { 212 rb = *link; 213 parent = rb_entry(rb, struct drm_mm_node, rb); 214 if (parent->__subtree_last < node->__subtree_last) 215 parent->__subtree_last = node->__subtree_last; 216 if (node->start < parent->start) 217 link = &parent->rb.rb_left; 218 else 219 link = &parent->rb.rb_right; 220 } 221 222 rb_link_node(&node->rb, rb, link); 223 rb_insert_augmented(&node->rb, 224 &mm->interval_tree, 225 &drm_mm_interval_tree_augment); 226 } 227 228 static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 229 struct drm_mm_node *node, 230 u64 size, unsigned alignment, 231 unsigned long color, 232 enum drm_mm_allocator_flags flags) 233 { 234 struct drm_mm *mm = hole_node->mm; 235 u64 hole_start = drm_mm_hole_node_start(hole_node); 236 u64 hole_end = drm_mm_hole_node_end(hole_node); 237 u64 adj_start = hole_start; 238 u64 adj_end = hole_end; 239 240 BUG_ON(node->allocated); 241 242 if (mm->color_adjust) 243 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 244 245 if (flags & DRM_MM_CREATE_TOP) 246 adj_start = adj_end - size; 247 248 if (alignment) { 249 u64 tmp = adj_start; 250 unsigned rem; 251 252 rem = do_div(tmp, alignment); 253 if (rem) { 254 if (flags & DRM_MM_CREATE_TOP) 255 adj_start -= rem; 256 else 257 adj_start += alignment - rem; 258 } 259 } 260 261 BUG_ON(adj_start < hole_start); 262 BUG_ON(adj_end > hole_end); 263 264 if (adj_start == hole_start) { 265 hole_node->hole_follows = 0; 266 list_del(&hole_node->hole_stack); 267 } 268 269 node->start = adj_start; 270 node->size = size; 271 node->mm = mm; 272 node->color = color; 273 node->allocated = 1; 274 275 list_add(&node->node_list, &hole_node->node_list); 276 277 drm_mm_interval_tree_add_node(hole_node, node); 278 279 BUG_ON(node->start + node->size > adj_end); 280 281 node->hole_follows = 0; 282 if (__drm_mm_hole_node_start(node) < hole_end) { 283 list_add(&node->hole_stack, &mm->hole_stack); 284 node->hole_follows = 1; 285 } 286 287 save_stack(node); 288 } 289 290 /** 291 * drm_mm_reserve_node - insert an pre-initialized node 292 * @mm: drm_mm allocator to insert @node into 293 * @node: drm_mm_node to insert 294 * 295 * This functions inserts an already set-up drm_mm_node into the allocator, 296 * meaning that start, size and color must be set by the caller. This is useful 297 * to initialize the allocator with preallocated objects which must be set-up 298 * before the range allocator can be set-up, e.g. when taking over a firmware 299 * framebuffer. 300 * 301 * Returns: 302 * 0 on success, -ENOSPC if there's no hole where @node is. 303 */ 304 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 305 { 306 u64 end = node->start + node->size; 307 struct drm_mm_node *hole; 308 u64 hole_start, hole_end; 309 u64 adj_start, adj_end; 310 311 if (WARN_ON(node->size == 0)) 312 return -EINVAL; 313 314 end = node->start + node->size; 315 316 /* Find the relevant hole to add our node to */ 317 hole = drm_mm_interval_tree_iter_first(&mm->interval_tree, 318 node->start, ~(u64)0); 319 if (hole) { 320 if (hole->start < end) 321 return -ENOSPC; 322 } else { 323 hole = list_entry(&mm->head_node.node_list, 324 typeof(*hole), node_list); 325 } 326 327 hole = list_last_entry(&hole->node_list, typeof(*hole), node_list); 328 if (!hole->hole_follows) 329 return -ENOSPC; 330 331 adj_start = hole_start = __drm_mm_hole_node_start(hole); 332 adj_end = hole_end = __drm_mm_hole_node_end(hole); 333 334 if (mm->color_adjust) 335 mm->color_adjust(hole, node->color, &adj_start, &adj_end); 336 337 if (adj_start > node->start || adj_end < end) 338 return -ENOSPC; 339 340 node->mm = mm; 341 node->allocated = 1; 342 343 list_add(&node->node_list, &hole->node_list); 344 345 drm_mm_interval_tree_add_node(hole, node); 346 347 if (node->start == hole_start) { 348 hole->hole_follows = 0; 349 list_del(&hole->hole_stack); 350 } 351 352 node->hole_follows = 0; 353 if (end != hole_end) { 354 list_add(&node->hole_stack, &mm->hole_stack); 355 node->hole_follows = 1; 356 } 357 358 save_stack(node); 359 360 return 0; 361 } 362 EXPORT_SYMBOL(drm_mm_reserve_node); 363 364 /** 365 * drm_mm_insert_node_generic - search for space and insert @node 366 * @mm: drm_mm to allocate from 367 * @node: preallocate node to insert 368 * @size: size of the allocation 369 * @alignment: alignment of the allocation 370 * @color: opaque tag value to use for this node 371 * @sflags: flags to fine-tune the allocation search 372 * @aflags: flags to fine-tune the allocation behavior 373 * 374 * The preallocated node must be cleared to 0. 375 * 376 * Returns: 377 * 0 on success, -ENOSPC if there's no suitable hole. 378 */ 379 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 380 u64 size, unsigned alignment, 381 unsigned long color, 382 enum drm_mm_search_flags sflags, 383 enum drm_mm_allocator_flags aflags) 384 { 385 struct drm_mm_node *hole_node; 386 387 if (WARN_ON(size == 0)) 388 return -EINVAL; 389 390 hole_node = drm_mm_search_free_generic(mm, size, alignment, 391 color, sflags); 392 if (!hole_node) 393 return -ENOSPC; 394 395 drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags); 396 return 0; 397 } 398 EXPORT_SYMBOL(drm_mm_insert_node_generic); 399 400 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 401 struct drm_mm_node *node, 402 u64 size, unsigned alignment, 403 unsigned long color, 404 u64 start, u64 end, 405 enum drm_mm_allocator_flags flags) 406 { 407 struct drm_mm *mm = hole_node->mm; 408 u64 hole_start = drm_mm_hole_node_start(hole_node); 409 u64 hole_end = drm_mm_hole_node_end(hole_node); 410 u64 adj_start = hole_start; 411 u64 adj_end = hole_end; 412 413 BUG_ON(!hole_node->hole_follows || node->allocated); 414 415 if (adj_start < start) 416 adj_start = start; 417 if (adj_end > end) 418 adj_end = end; 419 420 if (mm->color_adjust) 421 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 422 423 if (flags & DRM_MM_CREATE_TOP) 424 adj_start = adj_end - size; 425 426 if (alignment) { 427 u64 tmp = adj_start; 428 unsigned rem; 429 430 rem = do_div(tmp, alignment); 431 if (rem) { 432 if (flags & DRM_MM_CREATE_TOP) 433 adj_start -= rem; 434 else 435 adj_start += alignment - rem; 436 } 437 } 438 439 if (adj_start == hole_start) { 440 hole_node->hole_follows = 0; 441 list_del(&hole_node->hole_stack); 442 } 443 444 node->start = adj_start; 445 node->size = size; 446 node->mm = mm; 447 node->color = color; 448 node->allocated = 1; 449 450 list_add(&node->node_list, &hole_node->node_list); 451 452 drm_mm_interval_tree_add_node(hole_node, node); 453 454 BUG_ON(node->start < start); 455 BUG_ON(node->start < adj_start); 456 BUG_ON(node->start + node->size > adj_end); 457 BUG_ON(node->start + node->size > end); 458 459 node->hole_follows = 0; 460 if (__drm_mm_hole_node_start(node) < hole_end) { 461 list_add(&node->hole_stack, &mm->hole_stack); 462 node->hole_follows = 1; 463 } 464 465 save_stack(node); 466 } 467 468 /** 469 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node 470 * @mm: drm_mm to allocate from 471 * @node: preallocate node to insert 472 * @size: size of the allocation 473 * @alignment: alignment of the allocation 474 * @color: opaque tag value to use for this node 475 * @start: start of the allowed range for this node 476 * @end: end of the allowed range for this node 477 * @sflags: flags to fine-tune the allocation search 478 * @aflags: flags to fine-tune the allocation behavior 479 * 480 * The preallocated node must be cleared to 0. 481 * 482 * Returns: 483 * 0 on success, -ENOSPC if there's no suitable hole. 484 */ 485 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 486 u64 size, unsigned alignment, 487 unsigned long color, 488 u64 start, u64 end, 489 enum drm_mm_search_flags sflags, 490 enum drm_mm_allocator_flags aflags) 491 { 492 struct drm_mm_node *hole_node; 493 494 if (WARN_ON(size == 0)) 495 return -EINVAL; 496 497 hole_node = drm_mm_search_free_in_range_generic(mm, 498 size, alignment, color, 499 start, end, sflags); 500 if (!hole_node) 501 return -ENOSPC; 502 503 drm_mm_insert_helper_range(hole_node, node, 504 size, alignment, color, 505 start, end, aflags); 506 return 0; 507 } 508 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 509 510 /** 511 * drm_mm_remove_node - Remove a memory node from the allocator. 512 * @node: drm_mm_node to remove 513 * 514 * This just removes a node from its drm_mm allocator. The node does not need to 515 * be cleared again before it can be re-inserted into this or any other drm_mm 516 * allocator. It is a bug to call this function on a un-allocated node. 517 */ 518 void drm_mm_remove_node(struct drm_mm_node *node) 519 { 520 struct drm_mm *mm = node->mm; 521 struct drm_mm_node *prev_node; 522 523 if (WARN_ON(!node->allocated)) 524 return; 525 526 BUG_ON(node->scanned_block || node->scanned_prev_free 527 || node->scanned_next_free); 528 529 prev_node = 530 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 531 532 if (node->hole_follows) { 533 BUG_ON(__drm_mm_hole_node_start(node) == 534 __drm_mm_hole_node_end(node)); 535 list_del(&node->hole_stack); 536 } else 537 BUG_ON(__drm_mm_hole_node_start(node) != 538 __drm_mm_hole_node_end(node)); 539 540 541 if (!prev_node->hole_follows) { 542 prev_node->hole_follows = 1; 543 list_add(&prev_node->hole_stack, &mm->hole_stack); 544 } else 545 list_move(&prev_node->hole_stack, &mm->hole_stack); 546 547 drm_mm_interval_tree_remove(node, &mm->interval_tree); 548 list_del(&node->node_list); 549 node->allocated = 0; 550 } 551 EXPORT_SYMBOL(drm_mm_remove_node); 552 553 static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) 554 { 555 if (end - start < size) 556 return 0; 557 558 if (alignment) { 559 u64 tmp = start; 560 unsigned rem; 561 562 rem = do_div(tmp, alignment); 563 if (rem) 564 start += alignment - rem; 565 } 566 567 return end >= start + size; 568 } 569 570 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 571 u64 size, 572 unsigned alignment, 573 unsigned long color, 574 enum drm_mm_search_flags flags) 575 { 576 struct drm_mm_node *entry; 577 struct drm_mm_node *best; 578 u64 adj_start; 579 u64 adj_end; 580 u64 best_size; 581 582 BUG_ON(mm->scanned_blocks); 583 584 best = NULL; 585 best_size = ~0UL; 586 587 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 588 flags & DRM_MM_SEARCH_BELOW) { 589 u64 hole_size = adj_end - adj_start; 590 591 if (mm->color_adjust) { 592 mm->color_adjust(entry, color, &adj_start, &adj_end); 593 if (adj_end <= adj_start) 594 continue; 595 } 596 597 if (!check_free_hole(adj_start, adj_end, size, alignment)) 598 continue; 599 600 if (!(flags & DRM_MM_SEARCH_BEST)) 601 return entry; 602 603 if (hole_size < best_size) { 604 best = entry; 605 best_size = hole_size; 606 } 607 } 608 609 return best; 610 } 611 612 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 613 u64 size, 614 unsigned alignment, 615 unsigned long color, 616 u64 start, 617 u64 end, 618 enum drm_mm_search_flags flags) 619 { 620 struct drm_mm_node *entry; 621 struct drm_mm_node *best; 622 u64 adj_start; 623 u64 adj_end; 624 u64 best_size; 625 626 BUG_ON(mm->scanned_blocks); 627 628 best = NULL; 629 best_size = ~0UL; 630 631 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 632 flags & DRM_MM_SEARCH_BELOW) { 633 u64 hole_size = adj_end - adj_start; 634 635 if (adj_start < start) 636 adj_start = start; 637 if (adj_end > end) 638 adj_end = end; 639 640 if (mm->color_adjust) { 641 mm->color_adjust(entry, color, &adj_start, &adj_end); 642 if (adj_end <= adj_start) 643 continue; 644 } 645 646 if (!check_free_hole(adj_start, adj_end, size, alignment)) 647 continue; 648 649 if (!(flags & DRM_MM_SEARCH_BEST)) 650 return entry; 651 652 if (hole_size < best_size) { 653 best = entry; 654 best_size = hole_size; 655 } 656 } 657 658 return best; 659 } 660 661 /** 662 * drm_mm_replace_node - move an allocation from @old to @new 663 * @old: drm_mm_node to remove from the allocator 664 * @new: drm_mm_node which should inherit @old's allocation 665 * 666 * This is useful for when drivers embed the drm_mm_node structure and hence 667 * can't move allocations by reassigning pointers. It's a combination of remove 668 * and insert with the guarantee that the allocation start will match. 669 */ 670 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 671 { 672 list_replace(&old->node_list, &new->node_list); 673 list_replace(&old->hole_stack, &new->hole_stack); 674 rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree); 675 new->hole_follows = old->hole_follows; 676 new->mm = old->mm; 677 new->start = old->start; 678 new->size = old->size; 679 new->color = old->color; 680 new->__subtree_last = old->__subtree_last; 681 682 old->allocated = 0; 683 new->allocated = 1; 684 } 685 EXPORT_SYMBOL(drm_mm_replace_node); 686 687 /** 688 * DOC: lru scan roaster 689 * 690 * Very often GPUs need to have continuous allocations for a given object. When 691 * evicting objects to make space for a new one it is therefore not most 692 * efficient when we simply start to select all objects from the tail of an LRU 693 * until there's a suitable hole: Especially for big objects or nodes that 694 * otherwise have special allocation constraints there's a good chance we evict 695 * lots of (smaller) objects unecessarily. 696 * 697 * The DRM range allocator supports this use-case through the scanning 698 * interfaces. First a scan operation needs to be initialized with 699 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds 700 * objects to the roaster (probably by walking an LRU list, but this can be 701 * freely implemented) until a suitable hole is found or there's no further 702 * evitable object. 703 * 704 * The the driver must walk through all objects again in exactly the reverse 705 * order to restore the allocator state. Note that while the allocator is used 706 * in the scan mode no other operation is allowed. 707 * 708 * Finally the driver evicts all objects selected in the scan. Adding and 709 * removing an object is O(1), and since freeing a node is also O(1) the overall 710 * complexity is O(scanned_objects). So like the free stack which needs to be 711 * walked before a scan operation even begins this is linear in the number of 712 * objects. It doesn't seem to hurt badly. 713 */ 714 715 /** 716 * drm_mm_init_scan - initialize lru scanning 717 * @mm: drm_mm to scan 718 * @size: size of the allocation 719 * @alignment: alignment of the allocation 720 * @color: opaque tag value to use for the allocation 721 * 722 * This simply sets up the scanning routines with the parameters for the desired 723 * hole. Note that there's no need to specify allocation flags, since they only 724 * change the place a node is allocated from within a suitable hole. 725 * 726 * Warning: 727 * As long as the scan list is non-empty, no other operations than 728 * adding/removing nodes to/from the scan list are allowed. 729 */ 730 void drm_mm_init_scan(struct drm_mm *mm, 731 u64 size, 732 unsigned alignment, 733 unsigned long color) 734 { 735 mm->scan_color = color; 736 mm->scan_alignment = alignment; 737 mm->scan_size = size; 738 mm->scanned_blocks = 0; 739 mm->scan_hit_start = 0; 740 mm->scan_hit_end = 0; 741 mm->scan_check_range = 0; 742 mm->prev_scanned_node = NULL; 743 } 744 EXPORT_SYMBOL(drm_mm_init_scan); 745 746 /** 747 * drm_mm_init_scan - initialize range-restricted lru scanning 748 * @mm: drm_mm to scan 749 * @size: size of the allocation 750 * @alignment: alignment of the allocation 751 * @color: opaque tag value to use for the allocation 752 * @start: start of the allowed range for the allocation 753 * @end: end of the allowed range for the allocation 754 * 755 * This simply sets up the scanning routines with the parameters for the desired 756 * hole. Note that there's no need to specify allocation flags, since they only 757 * change the place a node is allocated from within a suitable hole. 758 * 759 * Warning: 760 * As long as the scan list is non-empty, no other operations than 761 * adding/removing nodes to/from the scan list are allowed. 762 */ 763 void drm_mm_init_scan_with_range(struct drm_mm *mm, 764 u64 size, 765 unsigned alignment, 766 unsigned long color, 767 u64 start, 768 u64 end) 769 { 770 mm->scan_color = color; 771 mm->scan_alignment = alignment; 772 mm->scan_size = size; 773 mm->scanned_blocks = 0; 774 mm->scan_hit_start = 0; 775 mm->scan_hit_end = 0; 776 mm->scan_start = start; 777 mm->scan_end = end; 778 mm->scan_check_range = 1; 779 mm->prev_scanned_node = NULL; 780 } 781 EXPORT_SYMBOL(drm_mm_init_scan_with_range); 782 783 /** 784 * drm_mm_scan_add_block - add a node to the scan list 785 * @node: drm_mm_node to add 786 * 787 * Add a node to the scan list that might be freed to make space for the desired 788 * hole. 789 * 790 * Returns: 791 * True if a hole has been found, false otherwise. 792 */ 793 bool drm_mm_scan_add_block(struct drm_mm_node *node) 794 { 795 struct drm_mm *mm = node->mm; 796 struct drm_mm_node *prev_node; 797 u64 hole_start, hole_end; 798 u64 adj_start, adj_end; 799 800 mm->scanned_blocks++; 801 802 BUG_ON(node->scanned_block); 803 node->scanned_block = 1; 804 805 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 806 node_list); 807 808 node->scanned_preceeds_hole = prev_node->hole_follows; 809 prev_node->hole_follows = 1; 810 list_del(&node->node_list); 811 node->node_list.prev = &prev_node->node_list; 812 node->node_list.next = &mm->prev_scanned_node->node_list; 813 mm->prev_scanned_node = node; 814 815 adj_start = hole_start = drm_mm_hole_node_start(prev_node); 816 adj_end = hole_end = drm_mm_hole_node_end(prev_node); 817 818 if (mm->scan_check_range) { 819 if (adj_start < mm->scan_start) 820 adj_start = mm->scan_start; 821 if (adj_end > mm->scan_end) 822 adj_end = mm->scan_end; 823 } 824 825 if (mm->color_adjust) 826 mm->color_adjust(prev_node, mm->scan_color, 827 &adj_start, &adj_end); 828 829 if (check_free_hole(adj_start, adj_end, 830 mm->scan_size, mm->scan_alignment)) { 831 mm->scan_hit_start = hole_start; 832 mm->scan_hit_end = hole_end; 833 return true; 834 } 835 836 return false; 837 } 838 EXPORT_SYMBOL(drm_mm_scan_add_block); 839 840 /** 841 * drm_mm_scan_remove_block - remove a node from the scan list 842 * @node: drm_mm_node to remove 843 * 844 * Nodes _must_ be removed in the exact same order from the scan list as they 845 * have been added, otherwise the internal state of the memory manager will be 846 * corrupted. 847 * 848 * When the scan list is empty, the selected memory nodes can be freed. An 849 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then 850 * return the just freed block (because its at the top of the free_stack list). 851 * 852 * Returns: 853 * True if this block should be evicted, false otherwise. Will always 854 * return false when no hole has been found. 855 */ 856 bool drm_mm_scan_remove_block(struct drm_mm_node *node) 857 { 858 struct drm_mm *mm = node->mm; 859 struct drm_mm_node *prev_node; 860 861 mm->scanned_blocks--; 862 863 BUG_ON(!node->scanned_block); 864 node->scanned_block = 0; 865 866 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 867 node_list); 868 869 prev_node->hole_follows = node->scanned_preceeds_hole; 870 list_add(&node->node_list, &prev_node->node_list); 871 872 return (drm_mm_hole_node_end(node) > mm->scan_hit_start && 873 node->start < mm->scan_hit_end); 874 } 875 EXPORT_SYMBOL(drm_mm_scan_remove_block); 876 877 /** 878 * drm_mm_clean - checks whether an allocator is clean 879 * @mm: drm_mm allocator to check 880 * 881 * Returns: 882 * True if the allocator is completely free, false if there's still a node 883 * allocated in it. 884 */ 885 bool drm_mm_clean(struct drm_mm * mm) 886 { 887 struct list_head *head = &mm->head_node.node_list; 888 889 return (head->next->next == head); 890 } 891 EXPORT_SYMBOL(drm_mm_clean); 892 893 /** 894 * drm_mm_init - initialize a drm-mm allocator 895 * @mm: the drm_mm structure to initialize 896 * @start: start of the range managed by @mm 897 * @size: end of the range managed by @mm 898 * 899 * Note that @mm must be cleared to 0 before calling this function. 900 */ 901 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) 902 { 903 INIT_LIST_HEAD(&mm->hole_stack); 904 mm->scanned_blocks = 0; 905 906 /* Clever trick to avoid a special case in the free hole tracking. */ 907 INIT_LIST_HEAD(&mm->head_node.node_list); 908 mm->head_node.allocated = 0; 909 mm->head_node.hole_follows = 1; 910 mm->head_node.scanned_block = 0; 911 mm->head_node.scanned_prev_free = 0; 912 mm->head_node.scanned_next_free = 0; 913 mm->head_node.mm = mm; 914 mm->head_node.start = start + size; 915 mm->head_node.size = start - mm->head_node.start; 916 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); 917 918 mm->interval_tree = RB_ROOT; 919 920 mm->color_adjust = NULL; 921 } 922 EXPORT_SYMBOL(drm_mm_init); 923 924 /** 925 * drm_mm_takedown - clean up a drm_mm allocator 926 * @mm: drm_mm allocator to clean up 927 * 928 * Note that it is a bug to call this function on an allocator which is not 929 * clean. 930 */ 931 void drm_mm_takedown(struct drm_mm *mm) 932 { 933 if (WARN(!list_empty(&mm->head_node.node_list), 934 "Memory manager not clean during takedown.\n")) 935 show_leaks(mm); 936 937 } 938 EXPORT_SYMBOL(drm_mm_takedown); 939 940 static u64 drm_mm_debug_hole(struct drm_mm_node *entry, 941 const char *prefix) 942 { 943 u64 hole_start, hole_end, hole_size; 944 945 if (entry->hole_follows) { 946 hole_start = drm_mm_hole_node_start(entry); 947 hole_end = drm_mm_hole_node_end(entry); 948 hole_size = hole_end - hole_start; 949 pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start, 950 hole_end, hole_size); 951 return hole_size; 952 } 953 954 return 0; 955 } 956 957 /** 958 * drm_mm_debug_table - dump allocator state to dmesg 959 * @mm: drm_mm allocator to dump 960 * @prefix: prefix to use for dumping to dmesg 961 */ 962 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 963 { 964 struct drm_mm_node *entry; 965 u64 total_used = 0, total_free = 0, total = 0; 966 967 total_free += drm_mm_debug_hole(&mm->head_node, prefix); 968 969 drm_mm_for_each_node(entry, mm) { 970 pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start, 971 entry->start + entry->size, entry->size); 972 total_used += entry->size; 973 total_free += drm_mm_debug_hole(entry, prefix); 974 } 975 total = total_free + total_used; 976 977 pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total, 978 total_used, total_free); 979 } 980 EXPORT_SYMBOL(drm_mm_debug_table); 981 982 #if defined(CONFIG_DEBUG_FS) 983 static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) 984 { 985 u64 hole_start, hole_end, hole_size; 986 987 if (entry->hole_follows) { 988 hole_start = drm_mm_hole_node_start(entry); 989 hole_end = drm_mm_hole_node_end(entry); 990 hole_size = hole_end - hole_start; 991 seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start, 992 hole_end, hole_size); 993 return hole_size; 994 } 995 996 return 0; 997 } 998 999 /** 1000 * drm_mm_dump_table - dump allocator state to a seq_file 1001 * @m: seq_file to dump to 1002 * @mm: drm_mm allocator to dump 1003 */ 1004 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 1005 { 1006 struct drm_mm_node *entry; 1007 u64 total_used = 0, total_free = 0, total = 0; 1008 1009 total_free += drm_mm_dump_hole(m, &mm->head_node); 1010 1011 drm_mm_for_each_node(entry, mm) { 1012 seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start, 1013 entry->start + entry->size, entry->size); 1014 total_used += entry->size; 1015 total_free += drm_mm_dump_hole(m, entry); 1016 } 1017 total = total_free + total_used; 1018 1019 seq_printf(m, "total: %llu, used %llu free %llu\n", total, 1020 total_used, total_free); 1021 return 0; 1022 } 1023 EXPORT_SYMBOL(drm_mm_dump_table); 1024 #endif 1025