1 /************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * 27 **************************************************************************/ 28 29 /* 30 * Generic simple memory manager implementation. Intended to be used as a base 31 * class implementation for more advanced memory managers. 32 * 33 * Note that the algorithm used is quite simple and there might be substantial 34 * performance gains if a smarter free list is implemented. Currently it is just an 35 * unordered stack of free regions. This could easily be improved if an RB-tree 36 * is used instead. At least if we expect heavy fragmentation. 37 * 38 * Aligned allocations can also see improvement. 39 * 40 * Authors: 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 42 */ 43 44 #include <drm/drmP.h> 45 #include <drm/drm_mm.h> 46 #include <linux/slab.h> 47 #include <linux/seq_file.h> 48 #include <linux/export.h> 49 50 /** 51 * DOC: Overview 52 * 53 * drm_mm provides a simple range allocator. The drivers are free to use the 54 * resource allocator from the linux core if it suits them, the upside of drm_mm 55 * is that it's in the DRM core. Which means that it's easier to extend for 56 * some of the crazier special purpose needs of gpus. 57 * 58 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. 59 * Drivers are free to embed either of them into their own suitable 60 * datastructures. drm_mm itself will not do any allocations of its own, so if 61 * drivers choose not to embed nodes they need to still allocate them 62 * themselves. 63 * 64 * The range allocator also supports reservation of preallocated blocks. This is 65 * useful for taking over initial mode setting configurations from the firmware, 66 * where an object needs to be created which exactly matches the firmware's 67 * scanout target. As long as the range is still free it can be inserted anytime 68 * after the allocator is initialized, which helps with avoiding looped 69 * depencies in the driver load sequence. 70 * 71 * drm_mm maintains a stack of most recently freed holes, which of all 72 * simplistic datastructures seems to be a fairly decent approach to clustering 73 * allocations and avoiding too much fragmentation. This means free space 74 * searches are O(num_holes). Given that all the fancy features drm_mm supports 75 * something better would be fairly complex and since gfx thrashing is a fairly 76 * steep cliff not a real concern. Removing a node again is O(1). 77 * 78 * drm_mm supports a few features: Alignment and range restrictions can be 79 * supplied. Further more every &drm_mm_node has a color value (which is just an 80 * opaqua unsigned long) which in conjunction with a driver callback can be used 81 * to implement sophisticated placement restrictions. The i915 DRM driver uses 82 * this to implement guard pages between incompatible caching domains in the 83 * graphics TT. 84 * 85 * Two behaviors are supported for searching and allocating: bottom-up and top-down. 86 * The default is bottom-up. Top-down allocation can be used if the memory area 87 * has different restrictions, or just to reduce fragmentation. 88 * 89 * Finally iteration helpers to walk all nodes and all holes are provided as are 90 * some basic allocator dumpers for debugging. 91 */ 92 93 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 94 u64 size, 95 unsigned alignment, 96 unsigned long color, 97 enum drm_mm_search_flags flags); 98 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 99 u64 size, 100 unsigned alignment, 101 unsigned long color, 102 u64 start, 103 u64 end, 104 enum drm_mm_search_flags flags); 105 106 static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 107 struct drm_mm_node *node, 108 u64 size, unsigned alignment, 109 unsigned long color, 110 enum drm_mm_allocator_flags flags) 111 { 112 struct drm_mm *mm = hole_node->mm; 113 u64 hole_start = drm_mm_hole_node_start(hole_node); 114 u64 hole_end = drm_mm_hole_node_end(hole_node); 115 u64 adj_start = hole_start; 116 u64 adj_end = hole_end; 117 118 BUG_ON(node->allocated); 119 120 if (mm->color_adjust) 121 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 122 123 if (flags & DRM_MM_CREATE_TOP) 124 adj_start = adj_end - size; 125 126 if (alignment) { 127 u64 tmp = adj_start; 128 unsigned rem; 129 130 rem = do_div(tmp, alignment); 131 if (rem) { 132 if (flags & DRM_MM_CREATE_TOP) 133 adj_start -= rem; 134 else 135 adj_start += alignment - rem; 136 } 137 } 138 139 BUG_ON(adj_start < hole_start); 140 BUG_ON(adj_end > hole_end); 141 142 if (adj_start == hole_start) { 143 hole_node->hole_follows = 0; 144 list_del(&hole_node->hole_stack); 145 } 146 147 node->start = adj_start; 148 node->size = size; 149 node->mm = mm; 150 node->color = color; 151 node->allocated = 1; 152 153 INIT_LIST_HEAD(&node->hole_stack); 154 list_add(&node->node_list, &hole_node->node_list); 155 156 BUG_ON(node->start + node->size > adj_end); 157 158 node->hole_follows = 0; 159 if (__drm_mm_hole_node_start(node) < hole_end) { 160 list_add(&node->hole_stack, &mm->hole_stack); 161 node->hole_follows = 1; 162 } 163 } 164 165 /** 166 * drm_mm_reserve_node - insert an pre-initialized node 167 * @mm: drm_mm allocator to insert @node into 168 * @node: drm_mm_node to insert 169 * 170 * This functions inserts an already set-up drm_mm_node into the allocator, 171 * meaning that start, size and color must be set by the caller. This is useful 172 * to initialize the allocator with preallocated objects which must be set-up 173 * before the range allocator can be set-up, e.g. when taking over a firmware 174 * framebuffer. 175 * 176 * Returns: 177 * 0 on success, -ENOSPC if there's no hole where @node is. 178 */ 179 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 180 { 181 struct drm_mm_node *hole; 182 u64 end; 183 u64 hole_start; 184 u64 hole_end; 185 186 BUG_ON(node == NULL); 187 188 end = node->start + node->size; 189 190 /* Find the relevant hole to add our node to */ 191 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 192 if (hole_start > node->start || hole_end < end) 193 continue; 194 195 node->mm = mm; 196 node->allocated = 1; 197 198 INIT_LIST_HEAD(&node->hole_stack); 199 list_add(&node->node_list, &hole->node_list); 200 201 if (node->start == hole_start) { 202 hole->hole_follows = 0; 203 list_del_init(&hole->hole_stack); 204 } 205 206 node->hole_follows = 0; 207 if (end != hole_end) { 208 list_add(&node->hole_stack, &mm->hole_stack); 209 node->hole_follows = 1; 210 } 211 212 return 0; 213 } 214 215 return -ENOSPC; 216 } 217 EXPORT_SYMBOL(drm_mm_reserve_node); 218 219 /** 220 * drm_mm_insert_node_generic - search for space and insert @node 221 * @mm: drm_mm to allocate from 222 * @node: preallocate node to insert 223 * @size: size of the allocation 224 * @alignment: alignment of the allocation 225 * @color: opaque tag value to use for this node 226 * @sflags: flags to fine-tune the allocation search 227 * @aflags: flags to fine-tune the allocation behavior 228 * 229 * The preallocated node must be cleared to 0. 230 * 231 * Returns: 232 * 0 on success, -ENOSPC if there's no suitable hole. 233 */ 234 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 235 u64 size, unsigned alignment, 236 unsigned long color, 237 enum drm_mm_search_flags sflags, 238 enum drm_mm_allocator_flags aflags) 239 { 240 struct drm_mm_node *hole_node; 241 242 hole_node = drm_mm_search_free_generic(mm, size, alignment, 243 color, sflags); 244 if (!hole_node) 245 return -ENOSPC; 246 247 drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags); 248 return 0; 249 } 250 EXPORT_SYMBOL(drm_mm_insert_node_generic); 251 252 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 253 struct drm_mm_node *node, 254 u64 size, unsigned alignment, 255 unsigned long color, 256 u64 start, u64 end, 257 enum drm_mm_allocator_flags flags) 258 { 259 struct drm_mm *mm = hole_node->mm; 260 u64 hole_start = drm_mm_hole_node_start(hole_node); 261 u64 hole_end = drm_mm_hole_node_end(hole_node); 262 u64 adj_start = hole_start; 263 u64 adj_end = hole_end; 264 265 BUG_ON(!hole_node->hole_follows || node->allocated); 266 267 if (adj_start < start) 268 adj_start = start; 269 if (adj_end > end) 270 adj_end = end; 271 272 if (mm->color_adjust) 273 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 274 275 if (flags & DRM_MM_CREATE_TOP) 276 adj_start = adj_end - size; 277 278 if (alignment) { 279 u64 tmp = adj_start; 280 unsigned rem; 281 282 rem = do_div(tmp, alignment); 283 if (rem) { 284 if (flags & DRM_MM_CREATE_TOP) 285 adj_start -= rem; 286 else 287 adj_start += alignment - rem; 288 } 289 } 290 291 if (adj_start == hole_start) { 292 hole_node->hole_follows = 0; 293 list_del(&hole_node->hole_stack); 294 } 295 296 node->start = adj_start; 297 node->size = size; 298 node->mm = mm; 299 node->color = color; 300 node->allocated = 1; 301 302 INIT_LIST_HEAD(&node->hole_stack); 303 list_add(&node->node_list, &hole_node->node_list); 304 305 BUG_ON(node->start < start); 306 BUG_ON(node->start < adj_start); 307 BUG_ON(node->start + node->size > adj_end); 308 BUG_ON(node->start + node->size > end); 309 310 node->hole_follows = 0; 311 if (__drm_mm_hole_node_start(node) < hole_end) { 312 list_add(&node->hole_stack, &mm->hole_stack); 313 node->hole_follows = 1; 314 } 315 } 316 317 /** 318 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node 319 * @mm: drm_mm to allocate from 320 * @node: preallocate node to insert 321 * @size: size of the allocation 322 * @alignment: alignment of the allocation 323 * @color: opaque tag value to use for this node 324 * @start: start of the allowed range for this node 325 * @end: end of the allowed range for this node 326 * @sflags: flags to fine-tune the allocation search 327 * @aflags: flags to fine-tune the allocation behavior 328 * 329 * The preallocated node must be cleared to 0. 330 * 331 * Returns: 332 * 0 on success, -ENOSPC if there's no suitable hole. 333 */ 334 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 335 u64 size, unsigned alignment, 336 unsigned long color, 337 u64 start, u64 end, 338 enum drm_mm_search_flags sflags, 339 enum drm_mm_allocator_flags aflags) 340 { 341 struct drm_mm_node *hole_node; 342 343 hole_node = drm_mm_search_free_in_range_generic(mm, 344 size, alignment, color, 345 start, end, sflags); 346 if (!hole_node) 347 return -ENOSPC; 348 349 drm_mm_insert_helper_range(hole_node, node, 350 size, alignment, color, 351 start, end, aflags); 352 return 0; 353 } 354 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 355 356 /** 357 * drm_mm_remove_node - Remove a memory node from the allocator. 358 * @node: drm_mm_node to remove 359 * 360 * This just removes a node from its drm_mm allocator. The node does not need to 361 * be cleared again before it can be re-inserted into this or any other drm_mm 362 * allocator. It is a bug to call this function on a un-allocated node. 363 */ 364 void drm_mm_remove_node(struct drm_mm_node *node) 365 { 366 struct drm_mm *mm = node->mm; 367 struct drm_mm_node *prev_node; 368 369 if (WARN_ON(!node->allocated)) 370 return; 371 372 BUG_ON(node->scanned_block || node->scanned_prev_free 373 || node->scanned_next_free); 374 375 prev_node = 376 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 377 378 if (node->hole_follows) { 379 BUG_ON(__drm_mm_hole_node_start(node) == 380 __drm_mm_hole_node_end(node)); 381 list_del(&node->hole_stack); 382 } else 383 BUG_ON(__drm_mm_hole_node_start(node) != 384 __drm_mm_hole_node_end(node)); 385 386 387 if (!prev_node->hole_follows) { 388 prev_node->hole_follows = 1; 389 list_add(&prev_node->hole_stack, &mm->hole_stack); 390 } else 391 list_move(&prev_node->hole_stack, &mm->hole_stack); 392 393 list_del(&node->node_list); 394 node->allocated = 0; 395 } 396 EXPORT_SYMBOL(drm_mm_remove_node); 397 398 static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) 399 { 400 if (end - start < size) 401 return 0; 402 403 if (alignment) { 404 u64 tmp = start; 405 unsigned rem; 406 407 rem = do_div(tmp, alignment); 408 if (rem) 409 start += alignment - rem; 410 } 411 412 return end >= start + size; 413 } 414 415 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 416 u64 size, 417 unsigned alignment, 418 unsigned long color, 419 enum drm_mm_search_flags flags) 420 { 421 struct drm_mm_node *entry; 422 struct drm_mm_node *best; 423 u64 adj_start; 424 u64 adj_end; 425 u64 best_size; 426 427 BUG_ON(mm->scanned_blocks); 428 429 best = NULL; 430 best_size = ~0UL; 431 432 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 433 flags & DRM_MM_SEARCH_BELOW) { 434 u64 hole_size = adj_end - adj_start; 435 436 if (mm->color_adjust) { 437 mm->color_adjust(entry, color, &adj_start, &adj_end); 438 if (adj_end <= adj_start) 439 continue; 440 } 441 442 if (!check_free_hole(adj_start, adj_end, size, alignment)) 443 continue; 444 445 if (!(flags & DRM_MM_SEARCH_BEST)) 446 return entry; 447 448 if (hole_size < best_size) { 449 best = entry; 450 best_size = hole_size; 451 } 452 } 453 454 return best; 455 } 456 457 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 458 u64 size, 459 unsigned alignment, 460 unsigned long color, 461 u64 start, 462 u64 end, 463 enum drm_mm_search_flags flags) 464 { 465 struct drm_mm_node *entry; 466 struct drm_mm_node *best; 467 u64 adj_start; 468 u64 adj_end; 469 u64 best_size; 470 471 BUG_ON(mm->scanned_blocks); 472 473 best = NULL; 474 best_size = ~0UL; 475 476 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 477 flags & DRM_MM_SEARCH_BELOW) { 478 u64 hole_size = adj_end - adj_start; 479 480 if (adj_start < start) 481 adj_start = start; 482 if (adj_end > end) 483 adj_end = end; 484 485 if (mm->color_adjust) { 486 mm->color_adjust(entry, color, &adj_start, &adj_end); 487 if (adj_end <= adj_start) 488 continue; 489 } 490 491 if (!check_free_hole(adj_start, adj_end, size, alignment)) 492 continue; 493 494 if (!(flags & DRM_MM_SEARCH_BEST)) 495 return entry; 496 497 if (hole_size < best_size) { 498 best = entry; 499 best_size = hole_size; 500 } 501 } 502 503 return best; 504 } 505 506 /** 507 * drm_mm_replace_node - move an allocation from @old to @new 508 * @old: drm_mm_node to remove from the allocator 509 * @new: drm_mm_node which should inherit @old's allocation 510 * 511 * This is useful for when drivers embed the drm_mm_node structure and hence 512 * can't move allocations by reassigning pointers. It's a combination of remove 513 * and insert with the guarantee that the allocation start will match. 514 */ 515 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 516 { 517 list_replace(&old->node_list, &new->node_list); 518 list_replace(&old->hole_stack, &new->hole_stack); 519 new->hole_follows = old->hole_follows; 520 new->mm = old->mm; 521 new->start = old->start; 522 new->size = old->size; 523 new->color = old->color; 524 525 old->allocated = 0; 526 new->allocated = 1; 527 } 528 EXPORT_SYMBOL(drm_mm_replace_node); 529 530 /** 531 * DOC: lru scan roaster 532 * 533 * Very often GPUs need to have continuous allocations for a given object. When 534 * evicting objects to make space for a new one it is therefore not most 535 * efficient when we simply start to select all objects from the tail of an LRU 536 * until there's a suitable hole: Especially for big objects or nodes that 537 * otherwise have special allocation constraints there's a good chance we evict 538 * lots of (smaller) objects unecessarily. 539 * 540 * The DRM range allocator supports this use-case through the scanning 541 * interfaces. First a scan operation needs to be initialized with 542 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds 543 * objects to the roaster (probably by walking an LRU list, but this can be 544 * freely implemented) until a suitable hole is found or there's no further 545 * evitable object. 546 * 547 * The the driver must walk through all objects again in exactly the reverse 548 * order to restore the allocator state. Note that while the allocator is used 549 * in the scan mode no other operation is allowed. 550 * 551 * Finally the driver evicts all objects selected in the scan. Adding and 552 * removing an object is O(1), and since freeing a node is also O(1) the overall 553 * complexity is O(scanned_objects). So like the free stack which needs to be 554 * walked before a scan operation even begins this is linear in the number of 555 * objects. It doesn't seem to hurt badly. 556 */ 557 558 /** 559 * drm_mm_init_scan - initialize lru scanning 560 * @mm: drm_mm to scan 561 * @size: size of the allocation 562 * @alignment: alignment of the allocation 563 * @color: opaque tag value to use for the allocation 564 * 565 * This simply sets up the scanning routines with the parameters for the desired 566 * hole. Note that there's no need to specify allocation flags, since they only 567 * change the place a node is allocated from within a suitable hole. 568 * 569 * Warning: 570 * As long as the scan list is non-empty, no other operations than 571 * adding/removing nodes to/from the scan list are allowed. 572 */ 573 void drm_mm_init_scan(struct drm_mm *mm, 574 u64 size, 575 unsigned alignment, 576 unsigned long color) 577 { 578 mm->scan_color = color; 579 mm->scan_alignment = alignment; 580 mm->scan_size = size; 581 mm->scanned_blocks = 0; 582 mm->scan_hit_start = 0; 583 mm->scan_hit_end = 0; 584 mm->scan_check_range = 0; 585 mm->prev_scanned_node = NULL; 586 } 587 EXPORT_SYMBOL(drm_mm_init_scan); 588 589 /** 590 * drm_mm_init_scan - initialize range-restricted lru scanning 591 * @mm: drm_mm to scan 592 * @size: size of the allocation 593 * @alignment: alignment of the allocation 594 * @color: opaque tag value to use for the allocation 595 * @start: start of the allowed range for the allocation 596 * @end: end of the allowed range for the allocation 597 * 598 * This simply sets up the scanning routines with the parameters for the desired 599 * hole. Note that there's no need to specify allocation flags, since they only 600 * change the place a node is allocated from within a suitable hole. 601 * 602 * Warning: 603 * As long as the scan list is non-empty, no other operations than 604 * adding/removing nodes to/from the scan list are allowed. 605 */ 606 void drm_mm_init_scan_with_range(struct drm_mm *mm, 607 u64 size, 608 unsigned alignment, 609 unsigned long color, 610 u64 start, 611 u64 end) 612 { 613 mm->scan_color = color; 614 mm->scan_alignment = alignment; 615 mm->scan_size = size; 616 mm->scanned_blocks = 0; 617 mm->scan_hit_start = 0; 618 mm->scan_hit_end = 0; 619 mm->scan_start = start; 620 mm->scan_end = end; 621 mm->scan_check_range = 1; 622 mm->prev_scanned_node = NULL; 623 } 624 EXPORT_SYMBOL(drm_mm_init_scan_with_range); 625 626 /** 627 * drm_mm_scan_add_block - add a node to the scan list 628 * @node: drm_mm_node to add 629 * 630 * Add a node to the scan list that might be freed to make space for the desired 631 * hole. 632 * 633 * Returns: 634 * True if a hole has been found, false otherwise. 635 */ 636 bool drm_mm_scan_add_block(struct drm_mm_node *node) 637 { 638 struct drm_mm *mm = node->mm; 639 struct drm_mm_node *prev_node; 640 u64 hole_start, hole_end; 641 u64 adj_start, adj_end; 642 643 mm->scanned_blocks++; 644 645 BUG_ON(node->scanned_block); 646 node->scanned_block = 1; 647 648 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 649 node_list); 650 651 node->scanned_preceeds_hole = prev_node->hole_follows; 652 prev_node->hole_follows = 1; 653 list_del(&node->node_list); 654 node->node_list.prev = &prev_node->node_list; 655 node->node_list.next = &mm->prev_scanned_node->node_list; 656 mm->prev_scanned_node = node; 657 658 adj_start = hole_start = drm_mm_hole_node_start(prev_node); 659 adj_end = hole_end = drm_mm_hole_node_end(prev_node); 660 661 if (mm->scan_check_range) { 662 if (adj_start < mm->scan_start) 663 adj_start = mm->scan_start; 664 if (adj_end > mm->scan_end) 665 adj_end = mm->scan_end; 666 } 667 668 if (mm->color_adjust) 669 mm->color_adjust(prev_node, mm->scan_color, 670 &adj_start, &adj_end); 671 672 if (check_free_hole(adj_start, adj_end, 673 mm->scan_size, mm->scan_alignment)) { 674 mm->scan_hit_start = hole_start; 675 mm->scan_hit_end = hole_end; 676 return true; 677 } 678 679 return false; 680 } 681 EXPORT_SYMBOL(drm_mm_scan_add_block); 682 683 /** 684 * drm_mm_scan_remove_block - remove a node from the scan list 685 * @node: drm_mm_node to remove 686 * 687 * Nodes _must_ be removed in the exact same order from the scan list as they 688 * have been added, otherwise the internal state of the memory manager will be 689 * corrupted. 690 * 691 * When the scan list is empty, the selected memory nodes can be freed. An 692 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then 693 * return the just freed block (because its at the top of the free_stack list). 694 * 695 * Returns: 696 * True if this block should be evicted, false otherwise. Will always 697 * return false when no hole has been found. 698 */ 699 bool drm_mm_scan_remove_block(struct drm_mm_node *node) 700 { 701 struct drm_mm *mm = node->mm; 702 struct drm_mm_node *prev_node; 703 704 mm->scanned_blocks--; 705 706 BUG_ON(!node->scanned_block); 707 node->scanned_block = 0; 708 709 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 710 node_list); 711 712 prev_node->hole_follows = node->scanned_preceeds_hole; 713 list_add(&node->node_list, &prev_node->node_list); 714 715 return (drm_mm_hole_node_end(node) > mm->scan_hit_start && 716 node->start < mm->scan_hit_end); 717 } 718 EXPORT_SYMBOL(drm_mm_scan_remove_block); 719 720 /** 721 * drm_mm_clean - checks whether an allocator is clean 722 * @mm: drm_mm allocator to check 723 * 724 * Returns: 725 * True if the allocator is completely free, false if there's still a node 726 * allocated in it. 727 */ 728 bool drm_mm_clean(struct drm_mm * mm) 729 { 730 struct list_head *head = &mm->head_node.node_list; 731 732 return (head->next->next == head); 733 } 734 EXPORT_SYMBOL(drm_mm_clean); 735 736 /** 737 * drm_mm_init - initialize a drm-mm allocator 738 * @mm: the drm_mm structure to initialize 739 * @start: start of the range managed by @mm 740 * @size: end of the range managed by @mm 741 * 742 * Note that @mm must be cleared to 0 before calling this function. 743 */ 744 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) 745 { 746 INIT_LIST_HEAD(&mm->hole_stack); 747 mm->scanned_blocks = 0; 748 749 /* Clever trick to avoid a special case in the free hole tracking. */ 750 INIT_LIST_HEAD(&mm->head_node.node_list); 751 INIT_LIST_HEAD(&mm->head_node.hole_stack); 752 mm->head_node.hole_follows = 1; 753 mm->head_node.scanned_block = 0; 754 mm->head_node.scanned_prev_free = 0; 755 mm->head_node.scanned_next_free = 0; 756 mm->head_node.mm = mm; 757 mm->head_node.start = start + size; 758 mm->head_node.size = start - mm->head_node.start; 759 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); 760 761 mm->color_adjust = NULL; 762 } 763 EXPORT_SYMBOL(drm_mm_init); 764 765 /** 766 * drm_mm_takedown - clean up a drm_mm allocator 767 * @mm: drm_mm allocator to clean up 768 * 769 * Note that it is a bug to call this function on an allocator which is not 770 * clean. 771 */ 772 void drm_mm_takedown(struct drm_mm * mm) 773 { 774 WARN(!list_empty(&mm->head_node.node_list), 775 "Memory manager not clean during takedown.\n"); 776 } 777 EXPORT_SYMBOL(drm_mm_takedown); 778 779 static u64 drm_mm_debug_hole(struct drm_mm_node *entry, 780 const char *prefix) 781 { 782 u64 hole_start, hole_end, hole_size; 783 784 if (entry->hole_follows) { 785 hole_start = drm_mm_hole_node_start(entry); 786 hole_end = drm_mm_hole_node_end(entry); 787 hole_size = hole_end - hole_start; 788 pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start, 789 hole_end, hole_size); 790 return hole_size; 791 } 792 793 return 0; 794 } 795 796 /** 797 * drm_mm_debug_table - dump allocator state to dmesg 798 * @mm: drm_mm allocator to dump 799 * @prefix: prefix to use for dumping to dmesg 800 */ 801 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 802 { 803 struct drm_mm_node *entry; 804 u64 total_used = 0, total_free = 0, total = 0; 805 806 total_free += drm_mm_debug_hole(&mm->head_node, prefix); 807 808 drm_mm_for_each_node(entry, mm) { 809 pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start, 810 entry->start + entry->size, entry->size); 811 total_used += entry->size; 812 total_free += drm_mm_debug_hole(entry, prefix); 813 } 814 total = total_free + total_used; 815 816 pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total, 817 total_used, total_free); 818 } 819 EXPORT_SYMBOL(drm_mm_debug_table); 820 821 #if defined(CONFIG_DEBUG_FS) 822 static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) 823 { 824 u64 hole_start, hole_end, hole_size; 825 826 if (entry->hole_follows) { 827 hole_start = drm_mm_hole_node_start(entry); 828 hole_end = drm_mm_hole_node_end(entry); 829 hole_size = hole_end - hole_start; 830 seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start, 831 hole_end, hole_size); 832 return hole_size; 833 } 834 835 return 0; 836 } 837 838 /** 839 * drm_mm_dump_table - dump allocator state to a seq_file 840 * @m: seq_file to dump to 841 * @mm: drm_mm allocator to dump 842 */ 843 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 844 { 845 struct drm_mm_node *entry; 846 u64 total_used = 0, total_free = 0, total = 0; 847 848 total_free += drm_mm_dump_hole(m, &mm->head_node); 849 850 drm_mm_for_each_node(entry, mm) { 851 seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start, 852 entry->start + entry->size, entry->size); 853 total_used += entry->size; 854 total_free += drm_mm_dump_hole(m, entry); 855 } 856 total = total_free + total_used; 857 858 seq_printf(m, "total: %llu, used %llu free %llu\n", total, 859 total_used, total_free); 860 return 0; 861 } 862 EXPORT_SYMBOL(drm_mm_dump_table); 863 #endif 864