xref: /openbmc/linux/drivers/gpu/drm/drm_mm.c (revision bdeeed09)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * Copyright 2016 Intel Corporation
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  *
28  **************************************************************************/
29 
30 /*
31  * Generic simple memory manager implementation. Intended to be used as a base
32  * class implementation for more advanced memory managers.
33  *
34  * Note that the algorithm used is quite simple and there might be substantial
35  * performance gains if a smarter free list is implemented. Currently it is
36  * just an unordered stack of free regions. This could easily be improved if
37  * an RB-tree is used instead. At least if we expect heavy fragmentation.
38  *
39  * Aligned allocations can also see improvement.
40  *
41  * Authors:
42  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43  */
44 
45 #include <drm/drmP.h>
46 #include <drm/drm_mm.h>
47 #include <linux/slab.h>
48 #include <linux/seq_file.h>
49 #include <linux/export.h>
50 #include <linux/interval_tree_generic.h>
51 
52 /**
53  * DOC: Overview
54  *
55  * drm_mm provides a simple range allocator. The drivers are free to use the
56  * resource allocator from the linux core if it suits them, the upside of drm_mm
57  * is that it's in the DRM core. Which means that it's easier to extend for
58  * some of the crazier special purpose needs of gpus.
59  *
60  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
61  * Drivers are free to embed either of them into their own suitable
62  * datastructures. drm_mm itself will not do any memory allocations of its own,
63  * so if drivers choose not to embed nodes they need to still allocate them
64  * themselves.
65  *
66  * The range allocator also supports reservation of preallocated blocks. This is
67  * useful for taking over initial mode setting configurations from the firmware,
68  * where an object needs to be created which exactly matches the firmware's
69  * scanout target. As long as the range is still free it can be inserted anytime
70  * after the allocator is initialized, which helps with avoiding looped
71  * dependencies in the driver load sequence.
72  *
73  * drm_mm maintains a stack of most recently freed holes, which of all
74  * simplistic datastructures seems to be a fairly decent approach to clustering
75  * allocations and avoiding too much fragmentation. This means free space
76  * searches are O(num_holes). Given that all the fancy features drm_mm supports
77  * something better would be fairly complex and since gfx thrashing is a fairly
78  * steep cliff not a real concern. Removing a node again is O(1).
79  *
80  * drm_mm supports a few features: Alignment and range restrictions can be
81  * supplied. Furthermore every &drm_mm_node has a color value (which is just an
82  * opaque unsigned long) which in conjunction with a driver callback can be used
83  * to implement sophisticated placement restrictions. The i915 DRM driver uses
84  * this to implement guard pages between incompatible caching domains in the
85  * graphics TT.
86  *
87  * Two behaviors are supported for searching and allocating: bottom-up and
88  * top-down. The default is bottom-up. Top-down allocation can be used if the
89  * memory area has different restrictions, or just to reduce fragmentation.
90  *
91  * Finally iteration helpers to walk all nodes and all holes are provided as are
92  * some basic allocator dumpers for debugging.
93  *
94  * Note that this range allocator is not thread-safe, drivers need to protect
95  * modifications with their own locking. The idea behind this is that for a full
96  * memory manager additional data needs to be protected anyway, hence internal
97  * locking would be fully redundant.
98  */
99 
100 #ifdef CONFIG_DRM_DEBUG_MM
101 #include <linux/stackdepot.h>
102 
103 #define STACKDEPTH 32
104 #define BUFSZ 4096
105 
106 static noinline void save_stack(struct drm_mm_node *node)
107 {
108 	unsigned long entries[STACKDEPTH];
109 	struct stack_trace trace = {
110 		.entries = entries,
111 		.max_entries = STACKDEPTH,
112 		.skip = 1
113 	};
114 
115 	save_stack_trace(&trace);
116 	if (trace.nr_entries != 0 &&
117 	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
118 		trace.nr_entries--;
119 
120 	/* May be called under spinlock, so avoid sleeping */
121 	node->stack = depot_save_stack(&trace, GFP_NOWAIT);
122 }
123 
124 static void show_leaks(struct drm_mm *mm)
125 {
126 	struct drm_mm_node *node;
127 	unsigned long entries[STACKDEPTH];
128 	char *buf;
129 
130 	buf = kmalloc(BUFSZ, GFP_KERNEL);
131 	if (!buf)
132 		return;
133 
134 	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
135 		struct stack_trace trace = {
136 			.entries = entries,
137 			.max_entries = STACKDEPTH
138 		};
139 
140 		if (!node->stack) {
141 			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
142 				  node->start, node->size);
143 			continue;
144 		}
145 
146 		depot_fetch_stack(node->stack, &trace);
147 		snprint_stack_trace(buf, BUFSZ, &trace, 0);
148 		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
149 			  node->start, node->size, buf);
150 	}
151 
152 	kfree(buf);
153 }
154 
155 #undef STACKDEPTH
156 #undef BUFSZ
157 #else
158 static void save_stack(struct drm_mm_node *node) { }
159 static void show_leaks(struct drm_mm *mm) { }
160 #endif
161 
162 #define START(node) ((node)->start)
163 #define LAST(node)  ((node)->start + (node)->size - 1)
164 
165 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
166 		     u64, __subtree_last,
167 		     START, LAST, static inline, drm_mm_interval_tree)
168 
169 struct drm_mm_node *
170 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
171 {
172 	return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
173 					       start, last) ?: (struct drm_mm_node *)&mm->head_node;
174 }
175 EXPORT_SYMBOL(__drm_mm_interval_first);
176 
177 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
178 					  struct drm_mm_node *node)
179 {
180 	struct drm_mm *mm = hole_node->mm;
181 	struct rb_node **link, *rb;
182 	struct drm_mm_node *parent;
183 	bool leftmost;
184 
185 	node->__subtree_last = LAST(node);
186 
187 	if (hole_node->allocated) {
188 		rb = &hole_node->rb;
189 		while (rb) {
190 			parent = rb_entry(rb, struct drm_mm_node, rb);
191 			if (parent->__subtree_last >= node->__subtree_last)
192 				break;
193 
194 			parent->__subtree_last = node->__subtree_last;
195 			rb = rb_parent(rb);
196 		}
197 
198 		rb = &hole_node->rb;
199 		link = &hole_node->rb.rb_right;
200 		leftmost = false;
201 	} else {
202 		rb = NULL;
203 		link = &mm->interval_tree.rb_root.rb_node;
204 		leftmost = true;
205 	}
206 
207 	while (*link) {
208 		rb = *link;
209 		parent = rb_entry(rb, struct drm_mm_node, rb);
210 		if (parent->__subtree_last < node->__subtree_last)
211 			parent->__subtree_last = node->__subtree_last;
212 		if (node->start < parent->start) {
213 			link = &parent->rb.rb_left;
214 		} else {
215 			link = &parent->rb.rb_right;
216 			leftmost = false;
217 		}
218 	}
219 
220 	rb_link_node(&node->rb, rb, link);
221 	rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
222 				   &drm_mm_interval_tree_augment);
223 }
224 
225 #define RB_INSERT(root, member, expr) do { \
226 	struct rb_node **link = &root.rb_node, *rb = NULL; \
227 	u64 x = expr(node); \
228 	while (*link) { \
229 		rb = *link; \
230 		if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
231 			link = &rb->rb_left; \
232 		else \
233 			link = &rb->rb_right; \
234 	} \
235 	rb_link_node(&node->member, rb, link); \
236 	rb_insert_color(&node->member, &root); \
237 } while (0)
238 
239 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
240 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
241 
242 static void add_hole(struct drm_mm_node *node)
243 {
244 	struct drm_mm *mm = node->mm;
245 
246 	node->hole_size =
247 		__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
248 	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
249 
250 	RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
251 	RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
252 
253 	list_add(&node->hole_stack, &mm->hole_stack);
254 }
255 
256 static void rm_hole(struct drm_mm_node *node)
257 {
258 	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
259 
260 	list_del(&node->hole_stack);
261 	rb_erase(&node->rb_hole_size, &node->mm->holes_size);
262 	rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
263 	node->hole_size = 0;
264 
265 	DRM_MM_BUG_ON(drm_mm_hole_follows(node));
266 }
267 
268 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
269 {
270 	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
271 }
272 
273 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
274 {
275 	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
276 }
277 
278 static inline u64 rb_hole_size(struct rb_node *rb)
279 {
280 	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
281 }
282 
283 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
284 {
285 	struct rb_node *best = NULL;
286 	struct rb_node **link = &mm->holes_size.rb_node;
287 
288 	while (*link) {
289 		struct rb_node *rb = *link;
290 
291 		if (size <= rb_hole_size(rb)) {
292 			link = &rb->rb_left;
293 			best = rb;
294 		} else {
295 			link = &rb->rb_right;
296 		}
297 	}
298 
299 	return rb_hole_size_to_node(best);
300 }
301 
302 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
303 {
304 	struct drm_mm_node *node = NULL;
305 	struct rb_node **link = &mm->holes_addr.rb_node;
306 
307 	while (*link) {
308 		u64 hole_start;
309 
310 		node = rb_hole_addr_to_node(*link);
311 		hole_start = __drm_mm_hole_node_start(node);
312 
313 		if (addr < hole_start)
314 			link = &node->rb_hole_addr.rb_left;
315 		else if (addr > hole_start + node->hole_size)
316 			link = &node->rb_hole_addr.rb_right;
317 		else
318 			break;
319 	}
320 
321 	return node;
322 }
323 
324 static struct drm_mm_node *
325 first_hole(struct drm_mm *mm,
326 	   u64 start, u64 end, u64 size,
327 	   enum drm_mm_insert_mode mode)
328 {
329 	if (RB_EMPTY_ROOT(&mm->holes_size))
330 		return NULL;
331 
332 	switch (mode) {
333 	default:
334 	case DRM_MM_INSERT_BEST:
335 		return best_hole(mm, size);
336 
337 	case DRM_MM_INSERT_LOW:
338 		return find_hole(mm, start);
339 
340 	case DRM_MM_INSERT_HIGH:
341 		return find_hole(mm, end);
342 
343 	case DRM_MM_INSERT_EVICT:
344 		return list_first_entry_or_null(&mm->hole_stack,
345 						struct drm_mm_node,
346 						hole_stack);
347 	}
348 }
349 
350 static struct drm_mm_node *
351 next_hole(struct drm_mm *mm,
352 	  struct drm_mm_node *node,
353 	  enum drm_mm_insert_mode mode)
354 {
355 	switch (mode) {
356 	default:
357 	case DRM_MM_INSERT_BEST:
358 		return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
359 
360 	case DRM_MM_INSERT_LOW:
361 		return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
362 
363 	case DRM_MM_INSERT_HIGH:
364 		return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
365 
366 	case DRM_MM_INSERT_EVICT:
367 		node = list_next_entry(node, hole_stack);
368 		return &node->hole_stack == &mm->hole_stack ? NULL : node;
369 	}
370 }
371 
372 /**
373  * drm_mm_reserve_node - insert an pre-initialized node
374  * @mm: drm_mm allocator to insert @node into
375  * @node: drm_mm_node to insert
376  *
377  * This functions inserts an already set-up &drm_mm_node into the allocator,
378  * meaning that start, size and color must be set by the caller. All other
379  * fields must be cleared to 0. This is useful to initialize the allocator with
380  * preallocated objects which must be set-up before the range allocator can be
381  * set-up, e.g. when taking over a firmware framebuffer.
382  *
383  * Returns:
384  * 0 on success, -ENOSPC if there's no hole where @node is.
385  */
386 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
387 {
388 	u64 end = node->start + node->size;
389 	struct drm_mm_node *hole;
390 	u64 hole_start, hole_end;
391 	u64 adj_start, adj_end;
392 
393 	end = node->start + node->size;
394 	if (unlikely(end <= node->start))
395 		return -ENOSPC;
396 
397 	/* Find the relevant hole to add our node to */
398 	hole = find_hole(mm, node->start);
399 	if (!hole)
400 		return -ENOSPC;
401 
402 	adj_start = hole_start = __drm_mm_hole_node_start(hole);
403 	adj_end = hole_end = hole_start + hole->hole_size;
404 
405 	if (mm->color_adjust)
406 		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
407 
408 	if (adj_start > node->start || adj_end < end)
409 		return -ENOSPC;
410 
411 	node->mm = mm;
412 
413 	list_add(&node->node_list, &hole->node_list);
414 	drm_mm_interval_tree_add_node(hole, node);
415 	node->allocated = true;
416 	node->hole_size = 0;
417 
418 	rm_hole(hole);
419 	if (node->start > hole_start)
420 		add_hole(hole);
421 	if (end < hole_end)
422 		add_hole(node);
423 
424 	save_stack(node);
425 	return 0;
426 }
427 EXPORT_SYMBOL(drm_mm_reserve_node);
428 
429 /**
430  * drm_mm_insert_node_in_range - ranged search for space and insert @node
431  * @mm: drm_mm to allocate from
432  * @node: preallocate node to insert
433  * @size: size of the allocation
434  * @alignment: alignment of the allocation
435  * @color: opaque tag value to use for this node
436  * @range_start: start of the allowed range for this node
437  * @range_end: end of the allowed range for this node
438  * @mode: fine-tune the allocation search and placement
439  *
440  * The preallocated @node must be cleared to 0.
441  *
442  * Returns:
443  * 0 on success, -ENOSPC if there's no suitable hole.
444  */
445 int drm_mm_insert_node_in_range(struct drm_mm * const mm,
446 				struct drm_mm_node * const node,
447 				u64 size, u64 alignment,
448 				unsigned long color,
449 				u64 range_start, u64 range_end,
450 				enum drm_mm_insert_mode mode)
451 {
452 	struct drm_mm_node *hole;
453 	u64 remainder_mask;
454 
455 	DRM_MM_BUG_ON(range_start >= range_end);
456 
457 	if (unlikely(size == 0 || range_end - range_start < size))
458 		return -ENOSPC;
459 
460 	if (alignment <= 1)
461 		alignment = 0;
462 
463 	remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
464 	for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
465 	     hole = next_hole(mm, hole, mode)) {
466 		u64 hole_start = __drm_mm_hole_node_start(hole);
467 		u64 hole_end = hole_start + hole->hole_size;
468 		u64 adj_start, adj_end;
469 		u64 col_start, col_end;
470 
471 		if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
472 			break;
473 
474 		if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
475 			break;
476 
477 		col_start = hole_start;
478 		col_end = hole_end;
479 		if (mm->color_adjust)
480 			mm->color_adjust(hole, color, &col_start, &col_end);
481 
482 		adj_start = max(col_start, range_start);
483 		adj_end = min(col_end, range_end);
484 
485 		if (adj_end <= adj_start || adj_end - adj_start < size)
486 			continue;
487 
488 		if (mode == DRM_MM_INSERT_HIGH)
489 			adj_start = adj_end - size;
490 
491 		if (alignment) {
492 			u64 rem;
493 
494 			if (likely(remainder_mask))
495 				rem = adj_start & remainder_mask;
496 			else
497 				div64_u64_rem(adj_start, alignment, &rem);
498 			if (rem) {
499 				adj_start -= rem;
500 				if (mode != DRM_MM_INSERT_HIGH)
501 					adj_start += alignment;
502 
503 				if (adj_start < max(col_start, range_start) ||
504 				    min(col_end, range_end) - adj_start < size)
505 					continue;
506 
507 				if (adj_end <= adj_start ||
508 				    adj_end - adj_start < size)
509 					continue;
510 			}
511 		}
512 
513 		node->mm = mm;
514 		node->size = size;
515 		node->start = adj_start;
516 		node->color = color;
517 		node->hole_size = 0;
518 
519 		list_add(&node->node_list, &hole->node_list);
520 		drm_mm_interval_tree_add_node(hole, node);
521 		node->allocated = true;
522 
523 		rm_hole(hole);
524 		if (adj_start > hole_start)
525 			add_hole(hole);
526 		if (adj_start + size < hole_end)
527 			add_hole(node);
528 
529 		save_stack(node);
530 		return 0;
531 	}
532 
533 	return -ENOSPC;
534 }
535 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
536 
537 /**
538  * drm_mm_remove_node - Remove a memory node from the allocator.
539  * @node: drm_mm_node to remove
540  *
541  * This just removes a node from its drm_mm allocator. The node does not need to
542  * be cleared again before it can be re-inserted into this or any other drm_mm
543  * allocator. It is a bug to call this function on a unallocated node.
544  */
545 void drm_mm_remove_node(struct drm_mm_node *node)
546 {
547 	struct drm_mm *mm = node->mm;
548 	struct drm_mm_node *prev_node;
549 
550 	DRM_MM_BUG_ON(!node->allocated);
551 	DRM_MM_BUG_ON(node->scanned_block);
552 
553 	prev_node = list_prev_entry(node, node_list);
554 
555 	if (drm_mm_hole_follows(node))
556 		rm_hole(node);
557 
558 	drm_mm_interval_tree_remove(node, &mm->interval_tree);
559 	list_del(&node->node_list);
560 	node->allocated = false;
561 
562 	if (drm_mm_hole_follows(prev_node))
563 		rm_hole(prev_node);
564 	add_hole(prev_node);
565 }
566 EXPORT_SYMBOL(drm_mm_remove_node);
567 
568 /**
569  * drm_mm_replace_node - move an allocation from @old to @new
570  * @old: drm_mm_node to remove from the allocator
571  * @new: drm_mm_node which should inherit @old's allocation
572  *
573  * This is useful for when drivers embed the drm_mm_node structure and hence
574  * can't move allocations by reassigning pointers. It's a combination of remove
575  * and insert with the guarantee that the allocation start will match.
576  */
577 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
578 {
579 	struct drm_mm *mm = old->mm;
580 
581 	DRM_MM_BUG_ON(!old->allocated);
582 
583 	*new = *old;
584 
585 	list_replace(&old->node_list, &new->node_list);
586 	rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
587 
588 	if (drm_mm_hole_follows(old)) {
589 		list_replace(&old->hole_stack, &new->hole_stack);
590 		rb_replace_node(&old->rb_hole_size,
591 				&new->rb_hole_size,
592 				&mm->holes_size);
593 		rb_replace_node(&old->rb_hole_addr,
594 				&new->rb_hole_addr,
595 				&mm->holes_addr);
596 	}
597 
598 	old->allocated = false;
599 	new->allocated = true;
600 }
601 EXPORT_SYMBOL(drm_mm_replace_node);
602 
603 /**
604  * DOC: lru scan roster
605  *
606  * Very often GPUs need to have continuous allocations for a given object. When
607  * evicting objects to make space for a new one it is therefore not most
608  * efficient when we simply start to select all objects from the tail of an LRU
609  * until there's a suitable hole: Especially for big objects or nodes that
610  * otherwise have special allocation constraints there's a good chance we evict
611  * lots of (smaller) objects unnecessarily.
612  *
613  * The DRM range allocator supports this use-case through the scanning
614  * interfaces. First a scan operation needs to be initialized with
615  * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
616  * objects to the roster, probably by walking an LRU list, but this can be
617  * freely implemented. Eviction candiates are added using
618  * drm_mm_scan_add_block() until a suitable hole is found or there are no
619  * further evictable objects. Eviction roster metadata is tracked in &struct
620  * drm_mm_scan.
621  *
622  * The driver must walk through all objects again in exactly the reverse
623  * order to restore the allocator state. Note that while the allocator is used
624  * in the scan mode no other operation is allowed.
625  *
626  * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
627  * reported true) in the scan, and any overlapping nodes after color adjustment
628  * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
629  * since freeing a node is also O(1) the overall complexity is
630  * O(scanned_objects). So like the free stack which needs to be walked before a
631  * scan operation even begins this is linear in the number of objects. It
632  * doesn't seem to hurt too badly.
633  */
634 
635 /**
636  * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
637  * @scan: scan state
638  * @mm: drm_mm to scan
639  * @size: size of the allocation
640  * @alignment: alignment of the allocation
641  * @color: opaque tag value to use for the allocation
642  * @start: start of the allowed range for the allocation
643  * @end: end of the allowed range for the allocation
644  * @mode: fine-tune the allocation search and placement
645  *
646  * This simply sets up the scanning routines with the parameters for the desired
647  * hole.
648  *
649  * Warning:
650  * As long as the scan list is non-empty, no other operations than
651  * adding/removing nodes to/from the scan list are allowed.
652  */
653 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
654 				 struct drm_mm *mm,
655 				 u64 size,
656 				 u64 alignment,
657 				 unsigned long color,
658 				 u64 start,
659 				 u64 end,
660 				 enum drm_mm_insert_mode mode)
661 {
662 	DRM_MM_BUG_ON(start >= end);
663 	DRM_MM_BUG_ON(!size || size > end - start);
664 	DRM_MM_BUG_ON(mm->scan_active);
665 
666 	scan->mm = mm;
667 
668 	if (alignment <= 1)
669 		alignment = 0;
670 
671 	scan->color = color;
672 	scan->alignment = alignment;
673 	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
674 	scan->size = size;
675 	scan->mode = mode;
676 
677 	DRM_MM_BUG_ON(end <= start);
678 	scan->range_start = start;
679 	scan->range_end = end;
680 
681 	scan->hit_start = U64_MAX;
682 	scan->hit_end = 0;
683 }
684 EXPORT_SYMBOL(drm_mm_scan_init_with_range);
685 
686 /**
687  * drm_mm_scan_add_block - add a node to the scan list
688  * @scan: the active drm_mm scanner
689  * @node: drm_mm_node to add
690  *
691  * Add a node to the scan list that might be freed to make space for the desired
692  * hole.
693  *
694  * Returns:
695  * True if a hole has been found, false otherwise.
696  */
697 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
698 			   struct drm_mm_node *node)
699 {
700 	struct drm_mm *mm = scan->mm;
701 	struct drm_mm_node *hole;
702 	u64 hole_start, hole_end;
703 	u64 col_start, col_end;
704 	u64 adj_start, adj_end;
705 
706 	DRM_MM_BUG_ON(node->mm != mm);
707 	DRM_MM_BUG_ON(!node->allocated);
708 	DRM_MM_BUG_ON(node->scanned_block);
709 	node->scanned_block = true;
710 	mm->scan_active++;
711 
712 	/* Remove this block from the node_list so that we enlarge the hole
713 	 * (distance between the end of our previous node and the start of
714 	 * or next), without poisoning the link so that we can restore it
715 	 * later in drm_mm_scan_remove_block().
716 	 */
717 	hole = list_prev_entry(node, node_list);
718 	DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
719 	__list_del_entry(&node->node_list);
720 
721 	hole_start = __drm_mm_hole_node_start(hole);
722 	hole_end = __drm_mm_hole_node_end(hole);
723 
724 	col_start = hole_start;
725 	col_end = hole_end;
726 	if (mm->color_adjust)
727 		mm->color_adjust(hole, scan->color, &col_start, &col_end);
728 
729 	adj_start = max(col_start, scan->range_start);
730 	adj_end = min(col_end, scan->range_end);
731 	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
732 		return false;
733 
734 	if (scan->mode == DRM_MM_INSERT_HIGH)
735 		adj_start = adj_end - scan->size;
736 
737 	if (scan->alignment) {
738 		u64 rem;
739 
740 		if (likely(scan->remainder_mask))
741 			rem = adj_start & scan->remainder_mask;
742 		else
743 			div64_u64_rem(adj_start, scan->alignment, &rem);
744 		if (rem) {
745 			adj_start -= rem;
746 			if (scan->mode != DRM_MM_INSERT_HIGH)
747 				adj_start += scan->alignment;
748 			if (adj_start < max(col_start, scan->range_start) ||
749 			    min(col_end, scan->range_end) - adj_start < scan->size)
750 				return false;
751 
752 			if (adj_end <= adj_start ||
753 			    adj_end - adj_start < scan->size)
754 				return false;
755 		}
756 	}
757 
758 	scan->hit_start = adj_start;
759 	scan->hit_end = adj_start + scan->size;
760 
761 	DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
762 	DRM_MM_BUG_ON(scan->hit_start < hole_start);
763 	DRM_MM_BUG_ON(scan->hit_end > hole_end);
764 
765 	return true;
766 }
767 EXPORT_SYMBOL(drm_mm_scan_add_block);
768 
769 /**
770  * drm_mm_scan_remove_block - remove a node from the scan list
771  * @scan: the active drm_mm scanner
772  * @node: drm_mm_node to remove
773  *
774  * Nodes **must** be removed in exactly the reverse order from the scan list as
775  * they have been added (e.g. using list_add() as they are added and then
776  * list_for_each() over that eviction list to remove), otherwise the internal
777  * state of the memory manager will be corrupted.
778  *
779  * When the scan list is empty, the selected memory nodes can be freed. An
780  * immediately following drm_mm_insert_node_in_range_generic() or one of the
781  * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
782  * the just freed block (because its at the top of the free_stack list).
783  *
784  * Returns:
785  * True if this block should be evicted, false otherwise. Will always
786  * return false when no hole has been found.
787  */
788 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
789 			      struct drm_mm_node *node)
790 {
791 	struct drm_mm_node *prev_node;
792 
793 	DRM_MM_BUG_ON(node->mm != scan->mm);
794 	DRM_MM_BUG_ON(!node->scanned_block);
795 	node->scanned_block = false;
796 
797 	DRM_MM_BUG_ON(!node->mm->scan_active);
798 	node->mm->scan_active--;
799 
800 	/* During drm_mm_scan_add_block() we decoupled this node leaving
801 	 * its pointers intact. Now that the caller is walking back along
802 	 * the eviction list we can restore this block into its rightful
803 	 * place on the full node_list. To confirm that the caller is walking
804 	 * backwards correctly we check that prev_node->next == node->next,
805 	 * i.e. both believe the same node should be on the other side of the
806 	 * hole.
807 	 */
808 	prev_node = list_prev_entry(node, node_list);
809 	DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
810 		      list_next_entry(node, node_list));
811 	list_add(&node->node_list, &prev_node->node_list);
812 
813 	return (node->start + node->size > scan->hit_start &&
814 		node->start < scan->hit_end);
815 }
816 EXPORT_SYMBOL(drm_mm_scan_remove_block);
817 
818 /**
819  * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
820  * @scan: drm_mm scan with target hole
821  *
822  * After completing an eviction scan and removing the selected nodes, we may
823  * need to remove a few more nodes from either side of the target hole if
824  * mm.color_adjust is being used.
825  *
826  * Returns:
827  * A node to evict, or NULL if there are no overlapping nodes.
828  */
829 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
830 {
831 	struct drm_mm *mm = scan->mm;
832 	struct drm_mm_node *hole;
833 	u64 hole_start, hole_end;
834 
835 	DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
836 
837 	if (!mm->color_adjust)
838 		return NULL;
839 
840 	/*
841 	 * The hole found during scanning should ideally be the first element
842 	 * in the hole_stack list, but due to side-effects in the driver it
843 	 * may not be.
844 	 */
845 	list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
846 		hole_start = __drm_mm_hole_node_start(hole);
847 		hole_end = hole_start + hole->hole_size;
848 
849 		if (hole_start <= scan->hit_start &&
850 		    hole_end >= scan->hit_end)
851 			break;
852 	}
853 
854 	/* We should only be called after we found the hole previously */
855 	DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
856 	if (unlikely(&hole->hole_stack == &mm->hole_stack))
857 		return NULL;
858 
859 	DRM_MM_BUG_ON(hole_start > scan->hit_start);
860 	DRM_MM_BUG_ON(hole_end < scan->hit_end);
861 
862 	mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
863 	if (hole_start > scan->hit_start)
864 		return hole;
865 	if (hole_end < scan->hit_end)
866 		return list_next_entry(hole, node_list);
867 
868 	return NULL;
869 }
870 EXPORT_SYMBOL(drm_mm_scan_color_evict);
871 
872 /**
873  * drm_mm_init - initialize a drm-mm allocator
874  * @mm: the drm_mm structure to initialize
875  * @start: start of the range managed by @mm
876  * @size: end of the range managed by @mm
877  *
878  * Note that @mm must be cleared to 0 before calling this function.
879  */
880 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
881 {
882 	DRM_MM_BUG_ON(start + size <= start);
883 
884 	mm->color_adjust = NULL;
885 
886 	INIT_LIST_HEAD(&mm->hole_stack);
887 	mm->interval_tree = RB_ROOT_CACHED;
888 	mm->holes_size = RB_ROOT;
889 	mm->holes_addr = RB_ROOT;
890 
891 	/* Clever trick to avoid a special case in the free hole tracking. */
892 	INIT_LIST_HEAD(&mm->head_node.node_list);
893 	mm->head_node.allocated = false;
894 	mm->head_node.mm = mm;
895 	mm->head_node.start = start + size;
896 	mm->head_node.size = -size;
897 	add_hole(&mm->head_node);
898 
899 	mm->scan_active = 0;
900 }
901 EXPORT_SYMBOL(drm_mm_init);
902 
903 /**
904  * drm_mm_takedown - clean up a drm_mm allocator
905  * @mm: drm_mm allocator to clean up
906  *
907  * Note that it is a bug to call this function on an allocator which is not
908  * clean.
909  */
910 void drm_mm_takedown(struct drm_mm *mm)
911 {
912 	if (WARN(!drm_mm_clean(mm),
913 		 "Memory manager not clean during takedown.\n"))
914 		show_leaks(mm);
915 }
916 EXPORT_SYMBOL(drm_mm_takedown);
917 
918 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
919 {
920 	u64 start, size;
921 
922 	size = entry->hole_size;
923 	if (size) {
924 		start = drm_mm_hole_node_start(entry);
925 		drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
926 			   start, start + size, size);
927 	}
928 
929 	return size;
930 }
931 /**
932  * drm_mm_print - print allocator state
933  * @mm: drm_mm allocator to print
934  * @p: DRM printer to use
935  */
936 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
937 {
938 	const struct drm_mm_node *entry;
939 	u64 total_used = 0, total_free = 0, total = 0;
940 
941 	total_free += drm_mm_dump_hole(p, &mm->head_node);
942 
943 	drm_mm_for_each_node(entry, mm) {
944 		drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
945 			   entry->start + entry->size, entry->size);
946 		total_used += entry->size;
947 		total_free += drm_mm_dump_hole(p, entry);
948 	}
949 	total = total_free + total_used;
950 
951 	drm_printf(p, "total: %llu, used %llu free %llu\n", total,
952 		   total_used, total_free);
953 }
954 EXPORT_SYMBOL(drm_mm_print);
955