1 /************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 4 * Copyright 2016 Intel Corporation 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * 28 **************************************************************************/ 29 30 /* 31 * Generic simple memory manager implementation. Intended to be used as a base 32 * class implementation for more advanced memory managers. 33 * 34 * Note that the algorithm used is quite simple and there might be substantial 35 * performance gains if a smarter free list is implemented. Currently it is 36 * just an unordered stack of free regions. This could easily be improved if 37 * an RB-tree is used instead. At least if we expect heavy fragmentation. 38 * 39 * Aligned allocations can also see improvement. 40 * 41 * Authors: 42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 43 */ 44 45 #include <drm/drmP.h> 46 #include <drm/drm_mm.h> 47 #include <linux/slab.h> 48 #include <linux/seq_file.h> 49 #include <linux/export.h> 50 #include <linux/interval_tree_generic.h> 51 52 /** 53 * DOC: Overview 54 * 55 * drm_mm provides a simple range allocator. The drivers are free to use the 56 * resource allocator from the linux core if it suits them, the upside of drm_mm 57 * is that it's in the DRM core. Which means that it's easier to extend for 58 * some of the crazier special purpose needs of gpus. 59 * 60 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. 61 * Drivers are free to embed either of them into their own suitable 62 * datastructures. drm_mm itself will not do any memory allocations of its own, 63 * so if drivers choose not to embed nodes they need to still allocate them 64 * themselves. 65 * 66 * The range allocator also supports reservation of preallocated blocks. This is 67 * useful for taking over initial mode setting configurations from the firmware, 68 * where an object needs to be created which exactly matches the firmware's 69 * scanout target. As long as the range is still free it can be inserted anytime 70 * after the allocator is initialized, which helps with avoiding looped 71 * dependencies in the driver load sequence. 72 * 73 * drm_mm maintains a stack of most recently freed holes, which of all 74 * simplistic datastructures seems to be a fairly decent approach to clustering 75 * allocations and avoiding too much fragmentation. This means free space 76 * searches are O(num_holes). Given that all the fancy features drm_mm supports 77 * something better would be fairly complex and since gfx thrashing is a fairly 78 * steep cliff not a real concern. Removing a node again is O(1). 79 * 80 * drm_mm supports a few features: Alignment and range restrictions can be 81 * supplied. Furthermore every &drm_mm_node has a color value (which is just an 82 * opaque unsigned long) which in conjunction with a driver callback can be used 83 * to implement sophisticated placement restrictions. The i915 DRM driver uses 84 * this to implement guard pages between incompatible caching domains in the 85 * graphics TT. 86 * 87 * Two behaviors are supported for searching and allocating: bottom-up and 88 * top-down. The default is bottom-up. Top-down allocation can be used if the 89 * memory area has different restrictions, or just to reduce fragmentation. 90 * 91 * Finally iteration helpers to walk all nodes and all holes are provided as are 92 * some basic allocator dumpers for debugging. 93 * 94 * Note that this range allocator is not thread-safe, drivers need to protect 95 * modifications with their on locking. The idea behind this is that for a full 96 * memory manager additional data needs to be protected anyway, hence internal 97 * locking would be fully redundant. 98 */ 99 100 #ifdef CONFIG_DRM_DEBUG_MM 101 #include <linux/stackdepot.h> 102 103 #define STACKDEPTH 32 104 #define BUFSZ 4096 105 106 static noinline void save_stack(struct drm_mm_node *node) 107 { 108 unsigned long entries[STACKDEPTH]; 109 struct stack_trace trace = { 110 .entries = entries, 111 .max_entries = STACKDEPTH, 112 .skip = 1 113 }; 114 115 save_stack_trace(&trace); 116 if (trace.nr_entries != 0 && 117 trace.entries[trace.nr_entries-1] == ULONG_MAX) 118 trace.nr_entries--; 119 120 /* May be called under spinlock, so avoid sleeping */ 121 node->stack = depot_save_stack(&trace, GFP_NOWAIT); 122 } 123 124 static void show_leaks(struct drm_mm *mm) 125 { 126 struct drm_mm_node *node; 127 unsigned long entries[STACKDEPTH]; 128 char *buf; 129 130 buf = kmalloc(BUFSZ, GFP_KERNEL); 131 if (!buf) 132 return; 133 134 list_for_each_entry(node, drm_mm_nodes(mm), node_list) { 135 struct stack_trace trace = { 136 .entries = entries, 137 .max_entries = STACKDEPTH 138 }; 139 140 if (!node->stack) { 141 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n", 142 node->start, node->size); 143 continue; 144 } 145 146 depot_fetch_stack(node->stack, &trace); 147 snprint_stack_trace(buf, BUFSZ, &trace, 0); 148 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", 149 node->start, node->size, buf); 150 } 151 152 kfree(buf); 153 } 154 155 #undef STACKDEPTH 156 #undef BUFSZ 157 #else 158 static void save_stack(struct drm_mm_node *node) { } 159 static void show_leaks(struct drm_mm *mm) { } 160 #endif 161 162 #define START(node) ((node)->start) 163 #define LAST(node) ((node)->start + (node)->size - 1) 164 165 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, 166 u64, __subtree_last, 167 START, LAST, static inline, drm_mm_interval_tree) 168 169 struct drm_mm_node * 170 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) 171 { 172 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, 173 start, last) ?: (struct drm_mm_node *)&mm->head_node; 174 } 175 EXPORT_SYMBOL(__drm_mm_interval_first); 176 177 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, 178 struct drm_mm_node *node) 179 { 180 struct drm_mm *mm = hole_node->mm; 181 struct rb_node **link, *rb; 182 struct drm_mm_node *parent; 183 bool leftmost = true; 184 185 node->__subtree_last = LAST(node); 186 187 if (hole_node->allocated) { 188 rb = &hole_node->rb; 189 while (rb) { 190 parent = rb_entry(rb, struct drm_mm_node, rb); 191 if (parent->__subtree_last >= node->__subtree_last) 192 break; 193 194 parent->__subtree_last = node->__subtree_last; 195 rb = rb_parent(rb); 196 } 197 198 rb = &hole_node->rb; 199 link = &hole_node->rb.rb_right; 200 leftmost = false; 201 } else { 202 rb = NULL; 203 link = &mm->interval_tree.rb_root.rb_node; 204 } 205 206 while (*link) { 207 rb = *link; 208 parent = rb_entry(rb, struct drm_mm_node, rb); 209 if (parent->__subtree_last < node->__subtree_last) 210 parent->__subtree_last = node->__subtree_last; 211 if (node->start < parent->start) 212 link = &parent->rb.rb_left; 213 else { 214 link = &parent->rb.rb_right; 215 leftmost = true; 216 } 217 } 218 219 rb_link_node(&node->rb, rb, link); 220 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, 221 &drm_mm_interval_tree_augment); 222 } 223 224 #define RB_INSERT(root, member, expr) do { \ 225 struct rb_node **link = &root.rb_node, *rb = NULL; \ 226 u64 x = expr(node); \ 227 while (*link) { \ 228 rb = *link; \ 229 if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \ 230 link = &rb->rb_left; \ 231 else \ 232 link = &rb->rb_right; \ 233 } \ 234 rb_link_node(&node->member, rb, link); \ 235 rb_insert_color(&node->member, &root); \ 236 } while (0) 237 238 #define HOLE_SIZE(NODE) ((NODE)->hole_size) 239 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE)) 240 241 static void add_hole(struct drm_mm_node *node) 242 { 243 struct drm_mm *mm = node->mm; 244 245 node->hole_size = 246 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node); 247 DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); 248 249 RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE); 250 RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR); 251 252 list_add(&node->hole_stack, &mm->hole_stack); 253 } 254 255 static void rm_hole(struct drm_mm_node *node) 256 { 257 DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); 258 259 list_del(&node->hole_stack); 260 rb_erase(&node->rb_hole_size, &node->mm->holes_size); 261 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr); 262 node->hole_size = 0; 263 264 DRM_MM_BUG_ON(drm_mm_hole_follows(node)); 265 } 266 267 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb) 268 { 269 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size); 270 } 271 272 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb) 273 { 274 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr); 275 } 276 277 static inline u64 rb_hole_size(struct rb_node *rb) 278 { 279 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; 280 } 281 282 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) 283 { 284 struct rb_node *best = NULL; 285 struct rb_node **link = &mm->holes_size.rb_node; 286 287 while (*link) { 288 struct rb_node *rb = *link; 289 290 if (size <= rb_hole_size(rb)) { 291 link = &rb->rb_left; 292 best = rb; 293 } else { 294 link = &rb->rb_right; 295 } 296 } 297 298 return rb_hole_size_to_node(best); 299 } 300 301 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr) 302 { 303 struct drm_mm_node *node = NULL; 304 struct rb_node **link = &mm->holes_addr.rb_node; 305 306 while (*link) { 307 u64 hole_start; 308 309 node = rb_hole_addr_to_node(*link); 310 hole_start = __drm_mm_hole_node_start(node); 311 312 if (addr < hole_start) 313 link = &node->rb_hole_addr.rb_left; 314 else if (addr > hole_start + node->hole_size) 315 link = &node->rb_hole_addr.rb_right; 316 else 317 break; 318 } 319 320 return node; 321 } 322 323 static struct drm_mm_node * 324 first_hole(struct drm_mm *mm, 325 u64 start, u64 end, u64 size, 326 enum drm_mm_insert_mode mode) 327 { 328 if (RB_EMPTY_ROOT(&mm->holes_size)) 329 return NULL; 330 331 switch (mode) { 332 default: 333 case DRM_MM_INSERT_BEST: 334 return best_hole(mm, size); 335 336 case DRM_MM_INSERT_LOW: 337 return find_hole(mm, start); 338 339 case DRM_MM_INSERT_HIGH: 340 return find_hole(mm, end); 341 342 case DRM_MM_INSERT_EVICT: 343 return list_first_entry_or_null(&mm->hole_stack, 344 struct drm_mm_node, 345 hole_stack); 346 } 347 } 348 349 static struct drm_mm_node * 350 next_hole(struct drm_mm *mm, 351 struct drm_mm_node *node, 352 enum drm_mm_insert_mode mode) 353 { 354 switch (mode) { 355 default: 356 case DRM_MM_INSERT_BEST: 357 return rb_hole_size_to_node(rb_next(&node->rb_hole_size)); 358 359 case DRM_MM_INSERT_LOW: 360 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr)); 361 362 case DRM_MM_INSERT_HIGH: 363 return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr)); 364 365 case DRM_MM_INSERT_EVICT: 366 node = list_next_entry(node, hole_stack); 367 return &node->hole_stack == &mm->hole_stack ? NULL : node; 368 } 369 } 370 371 /** 372 * drm_mm_reserve_node - insert an pre-initialized node 373 * @mm: drm_mm allocator to insert @node into 374 * @node: drm_mm_node to insert 375 * 376 * This functions inserts an already set-up &drm_mm_node into the allocator, 377 * meaning that start, size and color must be set by the caller. All other 378 * fields must be cleared to 0. This is useful to initialize the allocator with 379 * preallocated objects which must be set-up before the range allocator can be 380 * set-up, e.g. when taking over a firmware framebuffer. 381 * 382 * Returns: 383 * 0 on success, -ENOSPC if there's no hole where @node is. 384 */ 385 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 386 { 387 u64 end = node->start + node->size; 388 struct drm_mm_node *hole; 389 u64 hole_start, hole_end; 390 u64 adj_start, adj_end; 391 392 end = node->start + node->size; 393 if (unlikely(end <= node->start)) 394 return -ENOSPC; 395 396 /* Find the relevant hole to add our node to */ 397 hole = find_hole(mm, node->start); 398 if (!hole) 399 return -ENOSPC; 400 401 adj_start = hole_start = __drm_mm_hole_node_start(hole); 402 adj_end = hole_end = hole_start + hole->hole_size; 403 404 if (mm->color_adjust) 405 mm->color_adjust(hole, node->color, &adj_start, &adj_end); 406 407 if (adj_start > node->start || adj_end < end) 408 return -ENOSPC; 409 410 node->mm = mm; 411 412 list_add(&node->node_list, &hole->node_list); 413 drm_mm_interval_tree_add_node(hole, node); 414 node->allocated = true; 415 node->hole_size = 0; 416 417 rm_hole(hole); 418 if (node->start > hole_start) 419 add_hole(hole); 420 if (end < hole_end) 421 add_hole(node); 422 423 save_stack(node); 424 return 0; 425 } 426 EXPORT_SYMBOL(drm_mm_reserve_node); 427 428 /** 429 * drm_mm_insert_node_in_range - ranged search for space and insert @node 430 * @mm: drm_mm to allocate from 431 * @node: preallocate node to insert 432 * @size: size of the allocation 433 * @alignment: alignment of the allocation 434 * @color: opaque tag value to use for this node 435 * @range_start: start of the allowed range for this node 436 * @range_end: end of the allowed range for this node 437 * @mode: fine-tune the allocation search and placement 438 * 439 * The preallocated @node must be cleared to 0. 440 * 441 * Returns: 442 * 0 on success, -ENOSPC if there's no suitable hole. 443 */ 444 int drm_mm_insert_node_in_range(struct drm_mm * const mm, 445 struct drm_mm_node * const node, 446 u64 size, u64 alignment, 447 unsigned long color, 448 u64 range_start, u64 range_end, 449 enum drm_mm_insert_mode mode) 450 { 451 struct drm_mm_node *hole; 452 u64 remainder_mask; 453 454 DRM_MM_BUG_ON(range_start >= range_end); 455 456 if (unlikely(size == 0 || range_end - range_start < size)) 457 return -ENOSPC; 458 459 if (alignment <= 1) 460 alignment = 0; 461 462 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; 463 for (hole = first_hole(mm, range_start, range_end, size, mode); hole; 464 hole = next_hole(mm, hole, mode)) { 465 u64 hole_start = __drm_mm_hole_node_start(hole); 466 u64 hole_end = hole_start + hole->hole_size; 467 u64 adj_start, adj_end; 468 u64 col_start, col_end; 469 470 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end) 471 break; 472 473 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start) 474 break; 475 476 col_start = hole_start; 477 col_end = hole_end; 478 if (mm->color_adjust) 479 mm->color_adjust(hole, color, &col_start, &col_end); 480 481 adj_start = max(col_start, range_start); 482 adj_end = min(col_end, range_end); 483 484 if (adj_end <= adj_start || adj_end - adj_start < size) 485 continue; 486 487 if (mode == DRM_MM_INSERT_HIGH) 488 adj_start = adj_end - size; 489 490 if (alignment) { 491 u64 rem; 492 493 if (likely(remainder_mask)) 494 rem = adj_start & remainder_mask; 495 else 496 div64_u64_rem(adj_start, alignment, &rem); 497 if (rem) { 498 adj_start -= rem; 499 if (mode != DRM_MM_INSERT_HIGH) 500 adj_start += alignment; 501 502 if (adj_start < max(col_start, range_start) || 503 min(col_end, range_end) - adj_start < size) 504 continue; 505 506 if (adj_end <= adj_start || 507 adj_end - adj_start < size) 508 continue; 509 } 510 } 511 512 node->mm = mm; 513 node->size = size; 514 node->start = adj_start; 515 node->color = color; 516 node->hole_size = 0; 517 518 list_add(&node->node_list, &hole->node_list); 519 drm_mm_interval_tree_add_node(hole, node); 520 node->allocated = true; 521 522 rm_hole(hole); 523 if (adj_start > hole_start) 524 add_hole(hole); 525 if (adj_start + size < hole_end) 526 add_hole(node); 527 528 save_stack(node); 529 return 0; 530 } 531 532 return -ENOSPC; 533 } 534 EXPORT_SYMBOL(drm_mm_insert_node_in_range); 535 536 /** 537 * drm_mm_remove_node - Remove a memory node from the allocator. 538 * @node: drm_mm_node to remove 539 * 540 * This just removes a node from its drm_mm allocator. The node does not need to 541 * be cleared again before it can be re-inserted into this or any other drm_mm 542 * allocator. It is a bug to call this function on a unallocated node. 543 */ 544 void drm_mm_remove_node(struct drm_mm_node *node) 545 { 546 struct drm_mm *mm = node->mm; 547 struct drm_mm_node *prev_node; 548 549 DRM_MM_BUG_ON(!node->allocated); 550 DRM_MM_BUG_ON(node->scanned_block); 551 552 prev_node = list_prev_entry(node, node_list); 553 554 if (drm_mm_hole_follows(node)) 555 rm_hole(node); 556 557 drm_mm_interval_tree_remove(node, &mm->interval_tree); 558 list_del(&node->node_list); 559 node->allocated = false; 560 561 if (drm_mm_hole_follows(prev_node)) 562 rm_hole(prev_node); 563 add_hole(prev_node); 564 } 565 EXPORT_SYMBOL(drm_mm_remove_node); 566 567 /** 568 * drm_mm_replace_node - move an allocation from @old to @new 569 * @old: drm_mm_node to remove from the allocator 570 * @new: drm_mm_node which should inherit @old's allocation 571 * 572 * This is useful for when drivers embed the drm_mm_node structure and hence 573 * can't move allocations by reassigning pointers. It's a combination of remove 574 * and insert with the guarantee that the allocation start will match. 575 */ 576 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 577 { 578 DRM_MM_BUG_ON(!old->allocated); 579 580 *new = *old; 581 582 list_replace(&old->node_list, &new->node_list); 583 rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root); 584 585 if (drm_mm_hole_follows(old)) { 586 list_replace(&old->hole_stack, &new->hole_stack); 587 rb_replace_node(&old->rb_hole_size, 588 &new->rb_hole_size, 589 &old->mm->holes_size); 590 rb_replace_node(&old->rb_hole_addr, 591 &new->rb_hole_addr, 592 &old->mm->holes_addr); 593 } 594 595 old->allocated = false; 596 new->allocated = true; 597 } 598 EXPORT_SYMBOL(drm_mm_replace_node); 599 600 /** 601 * DOC: lru scan roster 602 * 603 * Very often GPUs need to have continuous allocations for a given object. When 604 * evicting objects to make space for a new one it is therefore not most 605 * efficient when we simply start to select all objects from the tail of an LRU 606 * until there's a suitable hole: Especially for big objects or nodes that 607 * otherwise have special allocation constraints there's a good chance we evict 608 * lots of (smaller) objects unnecessarily. 609 * 610 * The DRM range allocator supports this use-case through the scanning 611 * interfaces. First a scan operation needs to be initialized with 612 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds 613 * objects to the roster, probably by walking an LRU list, but this can be 614 * freely implemented. Eviction candiates are added using 615 * drm_mm_scan_add_block() until a suitable hole is found or there are no 616 * further evictable objects. Eviction roster metadata is tracked in &struct 617 * drm_mm_scan. 618 * 619 * The driver must walk through all objects again in exactly the reverse 620 * order to restore the allocator state. Note that while the allocator is used 621 * in the scan mode no other operation is allowed. 622 * 623 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block() 624 * reported true) in the scan, and any overlapping nodes after color adjustment 625 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and 626 * since freeing a node is also O(1) the overall complexity is 627 * O(scanned_objects). So like the free stack which needs to be walked before a 628 * scan operation even begins this is linear in the number of objects. It 629 * doesn't seem to hurt too badly. 630 */ 631 632 /** 633 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning 634 * @scan: scan state 635 * @mm: drm_mm to scan 636 * @size: size of the allocation 637 * @alignment: alignment of the allocation 638 * @color: opaque tag value to use for the allocation 639 * @start: start of the allowed range for the allocation 640 * @end: end of the allowed range for the allocation 641 * @mode: fine-tune the allocation search and placement 642 * 643 * This simply sets up the scanning routines with the parameters for the desired 644 * hole. 645 * 646 * Warning: 647 * As long as the scan list is non-empty, no other operations than 648 * adding/removing nodes to/from the scan list are allowed. 649 */ 650 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, 651 struct drm_mm *mm, 652 u64 size, 653 u64 alignment, 654 unsigned long color, 655 u64 start, 656 u64 end, 657 enum drm_mm_insert_mode mode) 658 { 659 DRM_MM_BUG_ON(start >= end); 660 DRM_MM_BUG_ON(!size || size > end - start); 661 DRM_MM_BUG_ON(mm->scan_active); 662 663 scan->mm = mm; 664 665 if (alignment <= 1) 666 alignment = 0; 667 668 scan->color = color; 669 scan->alignment = alignment; 670 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; 671 scan->size = size; 672 scan->mode = mode; 673 674 DRM_MM_BUG_ON(end <= start); 675 scan->range_start = start; 676 scan->range_end = end; 677 678 scan->hit_start = U64_MAX; 679 scan->hit_end = 0; 680 } 681 EXPORT_SYMBOL(drm_mm_scan_init_with_range); 682 683 /** 684 * drm_mm_scan_add_block - add a node to the scan list 685 * @scan: the active drm_mm scanner 686 * @node: drm_mm_node to add 687 * 688 * Add a node to the scan list that might be freed to make space for the desired 689 * hole. 690 * 691 * Returns: 692 * True if a hole has been found, false otherwise. 693 */ 694 bool drm_mm_scan_add_block(struct drm_mm_scan *scan, 695 struct drm_mm_node *node) 696 { 697 struct drm_mm *mm = scan->mm; 698 struct drm_mm_node *hole; 699 u64 hole_start, hole_end; 700 u64 col_start, col_end; 701 u64 adj_start, adj_end; 702 703 DRM_MM_BUG_ON(node->mm != mm); 704 DRM_MM_BUG_ON(!node->allocated); 705 DRM_MM_BUG_ON(node->scanned_block); 706 node->scanned_block = true; 707 mm->scan_active++; 708 709 /* Remove this block from the node_list so that we enlarge the hole 710 * (distance between the end of our previous node and the start of 711 * or next), without poisoning the link so that we can restore it 712 * later in drm_mm_scan_remove_block(). 713 */ 714 hole = list_prev_entry(node, node_list); 715 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node); 716 __list_del_entry(&node->node_list); 717 718 hole_start = __drm_mm_hole_node_start(hole); 719 hole_end = __drm_mm_hole_node_end(hole); 720 721 col_start = hole_start; 722 col_end = hole_end; 723 if (mm->color_adjust) 724 mm->color_adjust(hole, scan->color, &col_start, &col_end); 725 726 adj_start = max(col_start, scan->range_start); 727 adj_end = min(col_end, scan->range_end); 728 if (adj_end <= adj_start || adj_end - adj_start < scan->size) 729 return false; 730 731 if (scan->mode == DRM_MM_INSERT_HIGH) 732 adj_start = adj_end - scan->size; 733 734 if (scan->alignment) { 735 u64 rem; 736 737 if (likely(scan->remainder_mask)) 738 rem = adj_start & scan->remainder_mask; 739 else 740 div64_u64_rem(adj_start, scan->alignment, &rem); 741 if (rem) { 742 adj_start -= rem; 743 if (scan->mode != DRM_MM_INSERT_HIGH) 744 adj_start += scan->alignment; 745 if (adj_start < max(col_start, scan->range_start) || 746 min(col_end, scan->range_end) - adj_start < scan->size) 747 return false; 748 749 if (adj_end <= adj_start || 750 adj_end - adj_start < scan->size) 751 return false; 752 } 753 } 754 755 scan->hit_start = adj_start; 756 scan->hit_end = adj_start + scan->size; 757 758 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end); 759 DRM_MM_BUG_ON(scan->hit_start < hole_start); 760 DRM_MM_BUG_ON(scan->hit_end > hole_end); 761 762 return true; 763 } 764 EXPORT_SYMBOL(drm_mm_scan_add_block); 765 766 /** 767 * drm_mm_scan_remove_block - remove a node from the scan list 768 * @scan: the active drm_mm scanner 769 * @node: drm_mm_node to remove 770 * 771 * Nodes **must** be removed in exactly the reverse order from the scan list as 772 * they have been added (e.g. using list_add() as they are added and then 773 * list_for_each() over that eviction list to remove), otherwise the internal 774 * state of the memory manager will be corrupted. 775 * 776 * When the scan list is empty, the selected memory nodes can be freed. An 777 * immediately following drm_mm_insert_node_in_range_generic() or one of the 778 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return 779 * the just freed block (because its at the top of the free_stack list). 780 * 781 * Returns: 782 * True if this block should be evicted, false otherwise. Will always 783 * return false when no hole has been found. 784 */ 785 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, 786 struct drm_mm_node *node) 787 { 788 struct drm_mm_node *prev_node; 789 790 DRM_MM_BUG_ON(node->mm != scan->mm); 791 DRM_MM_BUG_ON(!node->scanned_block); 792 node->scanned_block = false; 793 794 DRM_MM_BUG_ON(!node->mm->scan_active); 795 node->mm->scan_active--; 796 797 /* During drm_mm_scan_add_block() we decoupled this node leaving 798 * its pointers intact. Now that the caller is walking back along 799 * the eviction list we can restore this block into its rightful 800 * place on the full node_list. To confirm that the caller is walking 801 * backwards correctly we check that prev_node->next == node->next, 802 * i.e. both believe the same node should be on the other side of the 803 * hole. 804 */ 805 prev_node = list_prev_entry(node, node_list); 806 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) != 807 list_next_entry(node, node_list)); 808 list_add(&node->node_list, &prev_node->node_list); 809 810 return (node->start + node->size > scan->hit_start && 811 node->start < scan->hit_end); 812 } 813 EXPORT_SYMBOL(drm_mm_scan_remove_block); 814 815 /** 816 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole 817 * @scan: drm_mm scan with target hole 818 * 819 * After completing an eviction scan and removing the selected nodes, we may 820 * need to remove a few more nodes from either side of the target hole if 821 * mm.color_adjust is being used. 822 * 823 * Returns: 824 * A node to evict, or NULL if there are no overlapping nodes. 825 */ 826 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) 827 { 828 struct drm_mm *mm = scan->mm; 829 struct drm_mm_node *hole; 830 u64 hole_start, hole_end; 831 832 DRM_MM_BUG_ON(list_empty(&mm->hole_stack)); 833 834 if (!mm->color_adjust) 835 return NULL; 836 837 hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack); 838 hole_start = __drm_mm_hole_node_start(hole); 839 hole_end = hole_start + hole->hole_size; 840 841 DRM_MM_BUG_ON(hole_start > scan->hit_start); 842 DRM_MM_BUG_ON(hole_end < scan->hit_end); 843 844 mm->color_adjust(hole, scan->color, &hole_start, &hole_end); 845 if (hole_start > scan->hit_start) 846 return hole; 847 if (hole_end < scan->hit_end) 848 return list_next_entry(hole, node_list); 849 850 return NULL; 851 } 852 EXPORT_SYMBOL(drm_mm_scan_color_evict); 853 854 /** 855 * drm_mm_init - initialize a drm-mm allocator 856 * @mm: the drm_mm structure to initialize 857 * @start: start of the range managed by @mm 858 * @size: end of the range managed by @mm 859 * 860 * Note that @mm must be cleared to 0 before calling this function. 861 */ 862 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) 863 { 864 DRM_MM_BUG_ON(start + size <= start); 865 866 mm->color_adjust = NULL; 867 868 INIT_LIST_HEAD(&mm->hole_stack); 869 mm->interval_tree = RB_ROOT_CACHED; 870 mm->holes_size = RB_ROOT; 871 mm->holes_addr = RB_ROOT; 872 873 /* Clever trick to avoid a special case in the free hole tracking. */ 874 INIT_LIST_HEAD(&mm->head_node.node_list); 875 mm->head_node.allocated = false; 876 mm->head_node.mm = mm; 877 mm->head_node.start = start + size; 878 mm->head_node.size = -size; 879 add_hole(&mm->head_node); 880 881 mm->scan_active = 0; 882 } 883 EXPORT_SYMBOL(drm_mm_init); 884 885 /** 886 * drm_mm_takedown - clean up a drm_mm allocator 887 * @mm: drm_mm allocator to clean up 888 * 889 * Note that it is a bug to call this function on an allocator which is not 890 * clean. 891 */ 892 void drm_mm_takedown(struct drm_mm *mm) 893 { 894 if (WARN(!drm_mm_clean(mm), 895 "Memory manager not clean during takedown.\n")) 896 show_leaks(mm); 897 } 898 EXPORT_SYMBOL(drm_mm_takedown); 899 900 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry) 901 { 902 u64 start, size; 903 904 size = entry->hole_size; 905 if (size) { 906 start = drm_mm_hole_node_start(entry); 907 drm_printf(p, "%#018llx-%#018llx: %llu: free\n", 908 start, start + size, size); 909 } 910 911 return size; 912 } 913 /** 914 * drm_mm_print - print allocator state 915 * @mm: drm_mm allocator to print 916 * @p: DRM printer to use 917 */ 918 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p) 919 { 920 const struct drm_mm_node *entry; 921 u64 total_used = 0, total_free = 0, total = 0; 922 923 total_free += drm_mm_dump_hole(p, &mm->head_node); 924 925 drm_mm_for_each_node(entry, mm) { 926 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start, 927 entry->start + entry->size, entry->size); 928 total_used += entry->size; 929 total_free += drm_mm_dump_hole(p, entry); 930 } 931 total = total_free + total_used; 932 933 drm_printf(p, "total: %llu, used %llu free %llu\n", total, 934 total_used, total_free); 935 } 936 EXPORT_SYMBOL(drm_mm_print); 937