xref: /openbmc/linux/drivers/gpu/drm/drm_mm.c (revision 37744fee)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * Copyright 2016 Intel Corporation
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  *
28  **************************************************************************/
29 
30 /*
31  * Generic simple memory manager implementation. Intended to be used as a base
32  * class implementation for more advanced memory managers.
33  *
34  * Note that the algorithm used is quite simple and there might be substantial
35  * performance gains if a smarter free list is implemented. Currently it is
36  * just an unordered stack of free regions. This could easily be improved if
37  * an RB-tree is used instead. At least if we expect heavy fragmentation.
38  *
39  * Aligned allocations can also see improvement.
40  *
41  * Authors:
42  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43  */
44 
45 #include <linux/export.h>
46 #include <linux/interval_tree_generic.h>
47 #include <linux/seq_file.h>
48 #include <linux/slab.h>
49 #include <linux/stacktrace.h>
50 
51 #include <drm/drm_mm.h>
52 
53 /**
54  * DOC: Overview
55  *
56  * drm_mm provides a simple range allocator. The drivers are free to use the
57  * resource allocator from the linux core if it suits them, the upside of drm_mm
58  * is that it's in the DRM core. Which means that it's easier to extend for
59  * some of the crazier special purpose needs of gpus.
60  *
61  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
62  * Drivers are free to embed either of them into their own suitable
63  * datastructures. drm_mm itself will not do any memory allocations of its own,
64  * so if drivers choose not to embed nodes they need to still allocate them
65  * themselves.
66  *
67  * The range allocator also supports reservation of preallocated blocks. This is
68  * useful for taking over initial mode setting configurations from the firmware,
69  * where an object needs to be created which exactly matches the firmware's
70  * scanout target. As long as the range is still free it can be inserted anytime
71  * after the allocator is initialized, which helps with avoiding looped
72  * dependencies in the driver load sequence.
73  *
74  * drm_mm maintains a stack of most recently freed holes, which of all
75  * simplistic datastructures seems to be a fairly decent approach to clustering
76  * allocations and avoiding too much fragmentation. This means free space
77  * searches are O(num_holes). Given that all the fancy features drm_mm supports
78  * something better would be fairly complex and since gfx thrashing is a fairly
79  * steep cliff not a real concern. Removing a node again is O(1).
80  *
81  * drm_mm supports a few features: Alignment and range restrictions can be
82  * supplied. Furthermore every &drm_mm_node has a color value (which is just an
83  * opaque unsigned long) which in conjunction with a driver callback can be used
84  * to implement sophisticated placement restrictions. The i915 DRM driver uses
85  * this to implement guard pages between incompatible caching domains in the
86  * graphics TT.
87  *
88  * Two behaviors are supported for searching and allocating: bottom-up and
89  * top-down. The default is bottom-up. Top-down allocation can be used if the
90  * memory area has different restrictions, or just to reduce fragmentation.
91  *
92  * Finally iteration helpers to walk all nodes and all holes are provided as are
93  * some basic allocator dumpers for debugging.
94  *
95  * Note that this range allocator is not thread-safe, drivers need to protect
96  * modifications with their own locking. The idea behind this is that for a full
97  * memory manager additional data needs to be protected anyway, hence internal
98  * locking would be fully redundant.
99  */
100 
101 #ifdef CONFIG_DRM_DEBUG_MM
102 #include <linux/stackdepot.h>
103 
104 #define STACKDEPTH 32
105 #define BUFSZ 4096
106 
107 static noinline void save_stack(struct drm_mm_node *node)
108 {
109 	unsigned long entries[STACKDEPTH];
110 	unsigned int n;
111 
112 	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
113 
114 	/* May be called under spinlock, so avoid sleeping */
115 	node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
116 }
117 
118 static void show_leaks(struct drm_mm *mm)
119 {
120 	struct drm_mm_node *node;
121 	unsigned long *entries;
122 	unsigned int nr_entries;
123 	char *buf;
124 
125 	buf = kmalloc(BUFSZ, GFP_KERNEL);
126 	if (!buf)
127 		return;
128 
129 	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
130 		if (!node->stack) {
131 			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
132 				  node->start, node->size);
133 			continue;
134 		}
135 
136 		nr_entries = stack_depot_fetch(node->stack, &entries);
137 		stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
138 		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
139 			  node->start, node->size, buf);
140 	}
141 
142 	kfree(buf);
143 }
144 
145 #undef STACKDEPTH
146 #undef BUFSZ
147 #else
148 static void save_stack(struct drm_mm_node *node) { }
149 static void show_leaks(struct drm_mm *mm) { }
150 #endif
151 
152 #define START(node) ((node)->start)
153 #define LAST(node)  ((node)->start + (node)->size - 1)
154 
155 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
156 		     u64, __subtree_last,
157 		     START, LAST, static inline, drm_mm_interval_tree)
158 
159 struct drm_mm_node *
160 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
161 {
162 	return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
163 					       start, last) ?: (struct drm_mm_node *)&mm->head_node;
164 }
165 EXPORT_SYMBOL(__drm_mm_interval_first);
166 
167 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
168 					  struct drm_mm_node *node)
169 {
170 	struct drm_mm *mm = hole_node->mm;
171 	struct rb_node **link, *rb;
172 	struct drm_mm_node *parent;
173 	bool leftmost;
174 
175 	node->__subtree_last = LAST(node);
176 
177 	if (drm_mm_node_allocated(hole_node)) {
178 		rb = &hole_node->rb;
179 		while (rb) {
180 			parent = rb_entry(rb, struct drm_mm_node, rb);
181 			if (parent->__subtree_last >= node->__subtree_last)
182 				break;
183 
184 			parent->__subtree_last = node->__subtree_last;
185 			rb = rb_parent(rb);
186 		}
187 
188 		rb = &hole_node->rb;
189 		link = &hole_node->rb.rb_right;
190 		leftmost = false;
191 	} else {
192 		rb = NULL;
193 		link = &mm->interval_tree.rb_root.rb_node;
194 		leftmost = true;
195 	}
196 
197 	while (*link) {
198 		rb = *link;
199 		parent = rb_entry(rb, struct drm_mm_node, rb);
200 		if (parent->__subtree_last < node->__subtree_last)
201 			parent->__subtree_last = node->__subtree_last;
202 		if (node->start < parent->start) {
203 			link = &parent->rb.rb_left;
204 		} else {
205 			link = &parent->rb.rb_right;
206 			leftmost = false;
207 		}
208 	}
209 
210 	rb_link_node(&node->rb, rb, link);
211 	rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
212 				   &drm_mm_interval_tree_augment);
213 }
214 
215 #define RB_INSERT(root, member, expr) do { \
216 	struct rb_node **link = &root.rb_node, *rb = NULL; \
217 	u64 x = expr(node); \
218 	while (*link) { \
219 		rb = *link; \
220 		if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
221 			link = &rb->rb_left; \
222 		else \
223 			link = &rb->rb_right; \
224 	} \
225 	rb_link_node(&node->member, rb, link); \
226 	rb_insert_color(&node->member, &root); \
227 } while (0)
228 
229 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
230 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
231 
232 static u64 rb_to_hole_size(struct rb_node *rb)
233 {
234 	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
235 }
236 
237 static void insert_hole_size(struct rb_root_cached *root,
238 			     struct drm_mm_node *node)
239 {
240 	struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
241 	u64 x = node->hole_size;
242 	bool first = true;
243 
244 	while (*link) {
245 		rb = *link;
246 		if (x > rb_to_hole_size(rb)) {
247 			link = &rb->rb_left;
248 		} else {
249 			link = &rb->rb_right;
250 			first = false;
251 		}
252 	}
253 
254 	rb_link_node(&node->rb_hole_size, rb, link);
255 	rb_insert_color_cached(&node->rb_hole_size, root, first);
256 }
257 
258 static void add_hole(struct drm_mm_node *node)
259 {
260 	struct drm_mm *mm = node->mm;
261 
262 	node->hole_size =
263 		__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
264 	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
265 
266 	insert_hole_size(&mm->holes_size, node);
267 	RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
268 
269 	list_add(&node->hole_stack, &mm->hole_stack);
270 }
271 
272 static void rm_hole(struct drm_mm_node *node)
273 {
274 	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
275 
276 	list_del(&node->hole_stack);
277 	rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
278 	rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
279 	node->hole_size = 0;
280 
281 	DRM_MM_BUG_ON(drm_mm_hole_follows(node));
282 }
283 
284 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
285 {
286 	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
287 }
288 
289 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
290 {
291 	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
292 }
293 
294 static inline u64 rb_hole_size(struct rb_node *rb)
295 {
296 	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
297 }
298 
299 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
300 {
301 	struct rb_node *rb = mm->holes_size.rb_root.rb_node;
302 	struct drm_mm_node *best = NULL;
303 
304 	do {
305 		struct drm_mm_node *node =
306 			rb_entry(rb, struct drm_mm_node, rb_hole_size);
307 
308 		if (size <= node->hole_size) {
309 			best = node;
310 			rb = rb->rb_right;
311 		} else {
312 			rb = rb->rb_left;
313 		}
314 	} while (rb);
315 
316 	return best;
317 }
318 
319 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
320 {
321 	struct rb_node *rb = mm->holes_addr.rb_node;
322 	struct drm_mm_node *node = NULL;
323 
324 	while (rb) {
325 		u64 hole_start;
326 
327 		node = rb_hole_addr_to_node(rb);
328 		hole_start = __drm_mm_hole_node_start(node);
329 
330 		if (addr < hole_start)
331 			rb = node->rb_hole_addr.rb_left;
332 		else if (addr > hole_start + node->hole_size)
333 			rb = node->rb_hole_addr.rb_right;
334 		else
335 			break;
336 	}
337 
338 	return node;
339 }
340 
341 static struct drm_mm_node *
342 first_hole(struct drm_mm *mm,
343 	   u64 start, u64 end, u64 size,
344 	   enum drm_mm_insert_mode mode)
345 {
346 	switch (mode) {
347 	default:
348 	case DRM_MM_INSERT_BEST:
349 		return best_hole(mm, size);
350 
351 	case DRM_MM_INSERT_LOW:
352 		return find_hole(mm, start);
353 
354 	case DRM_MM_INSERT_HIGH:
355 		return find_hole(mm, end);
356 
357 	case DRM_MM_INSERT_EVICT:
358 		return list_first_entry_or_null(&mm->hole_stack,
359 						struct drm_mm_node,
360 						hole_stack);
361 	}
362 }
363 
364 static struct drm_mm_node *
365 next_hole(struct drm_mm *mm,
366 	  struct drm_mm_node *node,
367 	  enum drm_mm_insert_mode mode)
368 {
369 	switch (mode) {
370 	default:
371 	case DRM_MM_INSERT_BEST:
372 		return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
373 
374 	case DRM_MM_INSERT_LOW:
375 		return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
376 
377 	case DRM_MM_INSERT_HIGH:
378 		return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
379 
380 	case DRM_MM_INSERT_EVICT:
381 		node = list_next_entry(node, hole_stack);
382 		return &node->hole_stack == &mm->hole_stack ? NULL : node;
383 	}
384 }
385 
386 /**
387  * drm_mm_reserve_node - insert an pre-initialized node
388  * @mm: drm_mm allocator to insert @node into
389  * @node: drm_mm_node to insert
390  *
391  * This functions inserts an already set-up &drm_mm_node into the allocator,
392  * meaning that start, size and color must be set by the caller. All other
393  * fields must be cleared to 0. This is useful to initialize the allocator with
394  * preallocated objects which must be set-up before the range allocator can be
395  * set-up, e.g. when taking over a firmware framebuffer.
396  *
397  * Returns:
398  * 0 on success, -ENOSPC if there's no hole where @node is.
399  */
400 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
401 {
402 	struct drm_mm_node *hole;
403 	u64 hole_start, hole_end;
404 	u64 adj_start, adj_end;
405 	u64 end;
406 
407 	end = node->start + node->size;
408 	if (unlikely(end <= node->start))
409 		return -ENOSPC;
410 
411 	/* Find the relevant hole to add our node to */
412 	hole = find_hole(mm, node->start);
413 	if (!hole)
414 		return -ENOSPC;
415 
416 	adj_start = hole_start = __drm_mm_hole_node_start(hole);
417 	adj_end = hole_end = hole_start + hole->hole_size;
418 
419 	if (mm->color_adjust)
420 		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
421 
422 	if (adj_start > node->start || adj_end < end)
423 		return -ENOSPC;
424 
425 	node->mm = mm;
426 
427 	__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
428 	list_add(&node->node_list, &hole->node_list);
429 	drm_mm_interval_tree_add_node(hole, node);
430 	node->hole_size = 0;
431 
432 	rm_hole(hole);
433 	if (node->start > hole_start)
434 		add_hole(hole);
435 	if (end < hole_end)
436 		add_hole(node);
437 
438 	save_stack(node);
439 	return 0;
440 }
441 EXPORT_SYMBOL(drm_mm_reserve_node);
442 
443 static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
444 {
445 	return rb ? rb_to_hole_size(rb) : 0;
446 }
447 
448 /**
449  * drm_mm_insert_node_in_range - ranged search for space and insert @node
450  * @mm: drm_mm to allocate from
451  * @node: preallocate node to insert
452  * @size: size of the allocation
453  * @alignment: alignment of the allocation
454  * @color: opaque tag value to use for this node
455  * @range_start: start of the allowed range for this node
456  * @range_end: end of the allowed range for this node
457  * @mode: fine-tune the allocation search and placement
458  *
459  * The preallocated @node must be cleared to 0.
460  *
461  * Returns:
462  * 0 on success, -ENOSPC if there's no suitable hole.
463  */
464 int drm_mm_insert_node_in_range(struct drm_mm * const mm,
465 				struct drm_mm_node * const node,
466 				u64 size, u64 alignment,
467 				unsigned long color,
468 				u64 range_start, u64 range_end,
469 				enum drm_mm_insert_mode mode)
470 {
471 	struct drm_mm_node *hole;
472 	u64 remainder_mask;
473 	bool once;
474 
475 	DRM_MM_BUG_ON(range_start > range_end);
476 
477 	if (unlikely(size == 0 || range_end - range_start < size))
478 		return -ENOSPC;
479 
480 	if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
481 		return -ENOSPC;
482 
483 	if (alignment <= 1)
484 		alignment = 0;
485 
486 	once = mode & DRM_MM_INSERT_ONCE;
487 	mode &= ~DRM_MM_INSERT_ONCE;
488 
489 	remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
490 	for (hole = first_hole(mm, range_start, range_end, size, mode);
491 	     hole;
492 	     hole = once ? NULL : next_hole(mm, hole, mode)) {
493 		u64 hole_start = __drm_mm_hole_node_start(hole);
494 		u64 hole_end = hole_start + hole->hole_size;
495 		u64 adj_start, adj_end;
496 		u64 col_start, col_end;
497 
498 		if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
499 			break;
500 
501 		if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
502 			break;
503 
504 		col_start = hole_start;
505 		col_end = hole_end;
506 		if (mm->color_adjust)
507 			mm->color_adjust(hole, color, &col_start, &col_end);
508 
509 		adj_start = max(col_start, range_start);
510 		adj_end = min(col_end, range_end);
511 
512 		if (adj_end <= adj_start || adj_end - adj_start < size)
513 			continue;
514 
515 		if (mode == DRM_MM_INSERT_HIGH)
516 			adj_start = adj_end - size;
517 
518 		if (alignment) {
519 			u64 rem;
520 
521 			if (likely(remainder_mask))
522 				rem = adj_start & remainder_mask;
523 			else
524 				div64_u64_rem(adj_start, alignment, &rem);
525 			if (rem) {
526 				adj_start -= rem;
527 				if (mode != DRM_MM_INSERT_HIGH)
528 					adj_start += alignment;
529 
530 				if (adj_start < max(col_start, range_start) ||
531 				    min(col_end, range_end) - adj_start < size)
532 					continue;
533 
534 				if (adj_end <= adj_start ||
535 				    adj_end - adj_start < size)
536 					continue;
537 			}
538 		}
539 
540 		node->mm = mm;
541 		node->size = size;
542 		node->start = adj_start;
543 		node->color = color;
544 		node->hole_size = 0;
545 
546 		__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
547 		list_add(&node->node_list, &hole->node_list);
548 		drm_mm_interval_tree_add_node(hole, node);
549 
550 		rm_hole(hole);
551 		if (adj_start > hole_start)
552 			add_hole(hole);
553 		if (adj_start + size < hole_end)
554 			add_hole(node);
555 
556 		save_stack(node);
557 		return 0;
558 	}
559 
560 	return -ENOSPC;
561 }
562 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
563 
564 static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
565 {
566 	return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
567 }
568 
569 /**
570  * drm_mm_remove_node - Remove a memory node from the allocator.
571  * @node: drm_mm_node to remove
572  *
573  * This just removes a node from its drm_mm allocator. The node does not need to
574  * be cleared again before it can be re-inserted into this or any other drm_mm
575  * allocator. It is a bug to call this function on a unallocated node.
576  */
577 void drm_mm_remove_node(struct drm_mm_node *node)
578 {
579 	struct drm_mm *mm = node->mm;
580 	struct drm_mm_node *prev_node;
581 
582 	DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
583 	DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
584 
585 	prev_node = list_prev_entry(node, node_list);
586 
587 	if (drm_mm_hole_follows(node))
588 		rm_hole(node);
589 
590 	drm_mm_interval_tree_remove(node, &mm->interval_tree);
591 	list_del(&node->node_list);
592 
593 	if (drm_mm_hole_follows(prev_node))
594 		rm_hole(prev_node);
595 	add_hole(prev_node);
596 
597 	clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
598 }
599 EXPORT_SYMBOL(drm_mm_remove_node);
600 
601 /**
602  * drm_mm_replace_node - move an allocation from @old to @new
603  * @old: drm_mm_node to remove from the allocator
604  * @new: drm_mm_node which should inherit @old's allocation
605  *
606  * This is useful for when drivers embed the drm_mm_node structure and hence
607  * can't move allocations by reassigning pointers. It's a combination of remove
608  * and insert with the guarantee that the allocation start will match.
609  */
610 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
611 {
612 	struct drm_mm *mm = old->mm;
613 
614 	DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
615 
616 	*new = *old;
617 
618 	__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
619 	list_replace(&old->node_list, &new->node_list);
620 	rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
621 
622 	if (drm_mm_hole_follows(old)) {
623 		list_replace(&old->hole_stack, &new->hole_stack);
624 		rb_replace_node_cached(&old->rb_hole_size,
625 				       &new->rb_hole_size,
626 				       &mm->holes_size);
627 		rb_replace_node(&old->rb_hole_addr,
628 				&new->rb_hole_addr,
629 				&mm->holes_addr);
630 	}
631 
632 	clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
633 }
634 EXPORT_SYMBOL(drm_mm_replace_node);
635 
636 /**
637  * DOC: lru scan roster
638  *
639  * Very often GPUs need to have continuous allocations for a given object. When
640  * evicting objects to make space for a new one it is therefore not most
641  * efficient when we simply start to select all objects from the tail of an LRU
642  * until there's a suitable hole: Especially for big objects or nodes that
643  * otherwise have special allocation constraints there's a good chance we evict
644  * lots of (smaller) objects unnecessarily.
645  *
646  * The DRM range allocator supports this use-case through the scanning
647  * interfaces. First a scan operation needs to be initialized with
648  * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
649  * objects to the roster, probably by walking an LRU list, but this can be
650  * freely implemented. Eviction candiates are added using
651  * drm_mm_scan_add_block() until a suitable hole is found or there are no
652  * further evictable objects. Eviction roster metadata is tracked in &struct
653  * drm_mm_scan.
654  *
655  * The driver must walk through all objects again in exactly the reverse
656  * order to restore the allocator state. Note that while the allocator is used
657  * in the scan mode no other operation is allowed.
658  *
659  * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
660  * reported true) in the scan, and any overlapping nodes after color adjustment
661  * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
662  * since freeing a node is also O(1) the overall complexity is
663  * O(scanned_objects). So like the free stack which needs to be walked before a
664  * scan operation even begins this is linear in the number of objects. It
665  * doesn't seem to hurt too badly.
666  */
667 
668 /**
669  * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
670  * @scan: scan state
671  * @mm: drm_mm to scan
672  * @size: size of the allocation
673  * @alignment: alignment of the allocation
674  * @color: opaque tag value to use for the allocation
675  * @start: start of the allowed range for the allocation
676  * @end: end of the allowed range for the allocation
677  * @mode: fine-tune the allocation search and placement
678  *
679  * This simply sets up the scanning routines with the parameters for the desired
680  * hole.
681  *
682  * Warning:
683  * As long as the scan list is non-empty, no other operations than
684  * adding/removing nodes to/from the scan list are allowed.
685  */
686 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
687 				 struct drm_mm *mm,
688 				 u64 size,
689 				 u64 alignment,
690 				 unsigned long color,
691 				 u64 start,
692 				 u64 end,
693 				 enum drm_mm_insert_mode mode)
694 {
695 	DRM_MM_BUG_ON(start >= end);
696 	DRM_MM_BUG_ON(!size || size > end - start);
697 	DRM_MM_BUG_ON(mm->scan_active);
698 
699 	scan->mm = mm;
700 
701 	if (alignment <= 1)
702 		alignment = 0;
703 
704 	scan->color = color;
705 	scan->alignment = alignment;
706 	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
707 	scan->size = size;
708 	scan->mode = mode;
709 
710 	DRM_MM_BUG_ON(end <= start);
711 	scan->range_start = start;
712 	scan->range_end = end;
713 
714 	scan->hit_start = U64_MAX;
715 	scan->hit_end = 0;
716 }
717 EXPORT_SYMBOL(drm_mm_scan_init_with_range);
718 
719 /**
720  * drm_mm_scan_add_block - add a node to the scan list
721  * @scan: the active drm_mm scanner
722  * @node: drm_mm_node to add
723  *
724  * Add a node to the scan list that might be freed to make space for the desired
725  * hole.
726  *
727  * Returns:
728  * True if a hole has been found, false otherwise.
729  */
730 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
731 			   struct drm_mm_node *node)
732 {
733 	struct drm_mm *mm = scan->mm;
734 	struct drm_mm_node *hole;
735 	u64 hole_start, hole_end;
736 	u64 col_start, col_end;
737 	u64 adj_start, adj_end;
738 
739 	DRM_MM_BUG_ON(node->mm != mm);
740 	DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
741 	DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
742 	__set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
743 	mm->scan_active++;
744 
745 	/* Remove this block from the node_list so that we enlarge the hole
746 	 * (distance between the end of our previous node and the start of
747 	 * or next), without poisoning the link so that we can restore it
748 	 * later in drm_mm_scan_remove_block().
749 	 */
750 	hole = list_prev_entry(node, node_list);
751 	DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
752 	__list_del_entry(&node->node_list);
753 
754 	hole_start = __drm_mm_hole_node_start(hole);
755 	hole_end = __drm_mm_hole_node_end(hole);
756 
757 	col_start = hole_start;
758 	col_end = hole_end;
759 	if (mm->color_adjust)
760 		mm->color_adjust(hole, scan->color, &col_start, &col_end);
761 
762 	adj_start = max(col_start, scan->range_start);
763 	adj_end = min(col_end, scan->range_end);
764 	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
765 		return false;
766 
767 	if (scan->mode == DRM_MM_INSERT_HIGH)
768 		adj_start = adj_end - scan->size;
769 
770 	if (scan->alignment) {
771 		u64 rem;
772 
773 		if (likely(scan->remainder_mask))
774 			rem = adj_start & scan->remainder_mask;
775 		else
776 			div64_u64_rem(adj_start, scan->alignment, &rem);
777 		if (rem) {
778 			adj_start -= rem;
779 			if (scan->mode != DRM_MM_INSERT_HIGH)
780 				adj_start += scan->alignment;
781 			if (adj_start < max(col_start, scan->range_start) ||
782 			    min(col_end, scan->range_end) - adj_start < scan->size)
783 				return false;
784 
785 			if (adj_end <= adj_start ||
786 			    adj_end - adj_start < scan->size)
787 				return false;
788 		}
789 	}
790 
791 	scan->hit_start = adj_start;
792 	scan->hit_end = adj_start + scan->size;
793 
794 	DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
795 	DRM_MM_BUG_ON(scan->hit_start < hole_start);
796 	DRM_MM_BUG_ON(scan->hit_end > hole_end);
797 
798 	return true;
799 }
800 EXPORT_SYMBOL(drm_mm_scan_add_block);
801 
802 /**
803  * drm_mm_scan_remove_block - remove a node from the scan list
804  * @scan: the active drm_mm scanner
805  * @node: drm_mm_node to remove
806  *
807  * Nodes **must** be removed in exactly the reverse order from the scan list as
808  * they have been added (e.g. using list_add() as they are added and then
809  * list_for_each() over that eviction list to remove), otherwise the internal
810  * state of the memory manager will be corrupted.
811  *
812  * When the scan list is empty, the selected memory nodes can be freed. An
813  * immediately following drm_mm_insert_node_in_range_generic() or one of the
814  * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
815  * the just freed block (because it's at the top of the free_stack list).
816  *
817  * Returns:
818  * True if this block should be evicted, false otherwise. Will always
819  * return false when no hole has been found.
820  */
821 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
822 			      struct drm_mm_node *node)
823 {
824 	struct drm_mm_node *prev_node;
825 
826 	DRM_MM_BUG_ON(node->mm != scan->mm);
827 	DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
828 	__clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
829 
830 	DRM_MM_BUG_ON(!node->mm->scan_active);
831 	node->mm->scan_active--;
832 
833 	/* During drm_mm_scan_add_block() we decoupled this node leaving
834 	 * its pointers intact. Now that the caller is walking back along
835 	 * the eviction list we can restore this block into its rightful
836 	 * place on the full node_list. To confirm that the caller is walking
837 	 * backwards correctly we check that prev_node->next == node->next,
838 	 * i.e. both believe the same node should be on the other side of the
839 	 * hole.
840 	 */
841 	prev_node = list_prev_entry(node, node_list);
842 	DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
843 		      list_next_entry(node, node_list));
844 	list_add(&node->node_list, &prev_node->node_list);
845 
846 	return (node->start + node->size > scan->hit_start &&
847 		node->start < scan->hit_end);
848 }
849 EXPORT_SYMBOL(drm_mm_scan_remove_block);
850 
851 /**
852  * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
853  * @scan: drm_mm scan with target hole
854  *
855  * After completing an eviction scan and removing the selected nodes, we may
856  * need to remove a few more nodes from either side of the target hole if
857  * mm.color_adjust is being used.
858  *
859  * Returns:
860  * A node to evict, or NULL if there are no overlapping nodes.
861  */
862 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
863 {
864 	struct drm_mm *mm = scan->mm;
865 	struct drm_mm_node *hole;
866 	u64 hole_start, hole_end;
867 
868 	DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
869 
870 	if (!mm->color_adjust)
871 		return NULL;
872 
873 	/*
874 	 * The hole found during scanning should ideally be the first element
875 	 * in the hole_stack list, but due to side-effects in the driver it
876 	 * may not be.
877 	 */
878 	list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
879 		hole_start = __drm_mm_hole_node_start(hole);
880 		hole_end = hole_start + hole->hole_size;
881 
882 		if (hole_start <= scan->hit_start &&
883 		    hole_end >= scan->hit_end)
884 			break;
885 	}
886 
887 	/* We should only be called after we found the hole previously */
888 	DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
889 	if (unlikely(&hole->hole_stack == &mm->hole_stack))
890 		return NULL;
891 
892 	DRM_MM_BUG_ON(hole_start > scan->hit_start);
893 	DRM_MM_BUG_ON(hole_end < scan->hit_end);
894 
895 	mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
896 	if (hole_start > scan->hit_start)
897 		return hole;
898 	if (hole_end < scan->hit_end)
899 		return list_next_entry(hole, node_list);
900 
901 	return NULL;
902 }
903 EXPORT_SYMBOL(drm_mm_scan_color_evict);
904 
905 /**
906  * drm_mm_init - initialize a drm-mm allocator
907  * @mm: the drm_mm structure to initialize
908  * @start: start of the range managed by @mm
909  * @size: end of the range managed by @mm
910  *
911  * Note that @mm must be cleared to 0 before calling this function.
912  */
913 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
914 {
915 	DRM_MM_BUG_ON(start + size <= start);
916 
917 	mm->color_adjust = NULL;
918 
919 	INIT_LIST_HEAD(&mm->hole_stack);
920 	mm->interval_tree = RB_ROOT_CACHED;
921 	mm->holes_size = RB_ROOT_CACHED;
922 	mm->holes_addr = RB_ROOT;
923 
924 	/* Clever trick to avoid a special case in the free hole tracking. */
925 	INIT_LIST_HEAD(&mm->head_node.node_list);
926 	mm->head_node.flags = 0;
927 	mm->head_node.mm = mm;
928 	mm->head_node.start = start + size;
929 	mm->head_node.size = -size;
930 	add_hole(&mm->head_node);
931 
932 	mm->scan_active = 0;
933 }
934 EXPORT_SYMBOL(drm_mm_init);
935 
936 /**
937  * drm_mm_takedown - clean up a drm_mm allocator
938  * @mm: drm_mm allocator to clean up
939  *
940  * Note that it is a bug to call this function on an allocator which is not
941  * clean.
942  */
943 void drm_mm_takedown(struct drm_mm *mm)
944 {
945 	if (WARN(!drm_mm_clean(mm),
946 		 "Memory manager not clean during takedown.\n"))
947 		show_leaks(mm);
948 }
949 EXPORT_SYMBOL(drm_mm_takedown);
950 
951 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
952 {
953 	u64 start, size;
954 
955 	size = entry->hole_size;
956 	if (size) {
957 		start = drm_mm_hole_node_start(entry);
958 		drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
959 			   start, start + size, size);
960 	}
961 
962 	return size;
963 }
964 /**
965  * drm_mm_print - print allocator state
966  * @mm: drm_mm allocator to print
967  * @p: DRM printer to use
968  */
969 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
970 {
971 	const struct drm_mm_node *entry;
972 	u64 total_used = 0, total_free = 0, total = 0;
973 
974 	total_free += drm_mm_dump_hole(p, &mm->head_node);
975 
976 	drm_mm_for_each_node(entry, mm) {
977 		drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
978 			   entry->start + entry->size, entry->size);
979 		total_used += entry->size;
980 		total_free += drm_mm_dump_hole(p, entry);
981 	}
982 	total = total_free + total_used;
983 
984 	drm_printf(p, "total: %llu, used %llu free %llu\n", total,
985 		   total_used, total_free);
986 }
987 EXPORT_SYMBOL(drm_mm_print);
988