xref: /openbmc/linux/drivers/gpu/drm/drm_mm.c (revision 12eb4683)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28 
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43 
44 #include <drm/drmP.h>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
49 
50 #define MM_UNUSED_TARGET 4
51 
52 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
53 						unsigned long size,
54 						unsigned alignment,
55 						unsigned long color,
56 						enum drm_mm_search_flags flags);
57 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
58 						unsigned long size,
59 						unsigned alignment,
60 						unsigned long color,
61 						unsigned long start,
62 						unsigned long end,
63 						enum drm_mm_search_flags flags);
64 
65 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
66 				 struct drm_mm_node *node,
67 				 unsigned long size, unsigned alignment,
68 				 unsigned long color)
69 {
70 	struct drm_mm *mm = hole_node->mm;
71 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
72 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
73 	unsigned long adj_start = hole_start;
74 	unsigned long adj_end = hole_end;
75 
76 	BUG_ON(node->allocated);
77 
78 	if (mm->color_adjust)
79 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
80 
81 	if (alignment) {
82 		unsigned tmp = adj_start % alignment;
83 		if (tmp)
84 			adj_start += alignment - tmp;
85 	}
86 
87 	if (adj_start == hole_start) {
88 		hole_node->hole_follows = 0;
89 		list_del(&hole_node->hole_stack);
90 	}
91 
92 	node->start = adj_start;
93 	node->size = size;
94 	node->mm = mm;
95 	node->color = color;
96 	node->allocated = 1;
97 
98 	INIT_LIST_HEAD(&node->hole_stack);
99 	list_add(&node->node_list, &hole_node->node_list);
100 
101 	BUG_ON(node->start + node->size > adj_end);
102 
103 	node->hole_follows = 0;
104 	if (__drm_mm_hole_node_start(node) < hole_end) {
105 		list_add(&node->hole_stack, &mm->hole_stack);
106 		node->hole_follows = 1;
107 	}
108 }
109 
110 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
111 {
112 	struct drm_mm_node *hole;
113 	unsigned long end = node->start + node->size;
114 	unsigned long hole_start;
115 	unsigned long hole_end;
116 
117 	BUG_ON(node == NULL);
118 
119 	/* Find the relevant hole to add our node to */
120 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
121 		if (hole_start > node->start || hole_end < end)
122 			continue;
123 
124 		node->mm = mm;
125 		node->allocated = 1;
126 
127 		INIT_LIST_HEAD(&node->hole_stack);
128 		list_add(&node->node_list, &hole->node_list);
129 
130 		if (node->start == hole_start) {
131 			hole->hole_follows = 0;
132 			list_del_init(&hole->hole_stack);
133 		}
134 
135 		node->hole_follows = 0;
136 		if (end != hole_end) {
137 			list_add(&node->hole_stack, &mm->hole_stack);
138 			node->hole_follows = 1;
139 		}
140 
141 		return 0;
142 	}
143 
144 	WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
145 	     node->start, node->size);
146 	return -ENOSPC;
147 }
148 EXPORT_SYMBOL(drm_mm_reserve_node);
149 
150 /**
151  * Search for free space and insert a preallocated memory node. Returns
152  * -ENOSPC if no suitable free area is available. The preallocated memory node
153  * must be cleared.
154  */
155 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
156 			       unsigned long size, unsigned alignment,
157 			       unsigned long color,
158 			       enum drm_mm_search_flags flags)
159 {
160 	struct drm_mm_node *hole_node;
161 
162 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
163 					       color, flags);
164 	if (!hole_node)
165 		return -ENOSPC;
166 
167 	drm_mm_insert_helper(hole_node, node, size, alignment, color);
168 	return 0;
169 }
170 EXPORT_SYMBOL(drm_mm_insert_node_generic);
171 
172 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
173 				       struct drm_mm_node *node,
174 				       unsigned long size, unsigned alignment,
175 				       unsigned long color,
176 				       unsigned long start, unsigned long end)
177 {
178 	struct drm_mm *mm = hole_node->mm;
179 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
180 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
181 	unsigned long adj_start = hole_start;
182 	unsigned long adj_end = hole_end;
183 
184 	BUG_ON(!hole_node->hole_follows || node->allocated);
185 
186 	if (adj_start < start)
187 		adj_start = start;
188 	if (adj_end > end)
189 		adj_end = end;
190 
191 	if (mm->color_adjust)
192 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
193 
194 	if (alignment) {
195 		unsigned tmp = adj_start % alignment;
196 		if (tmp)
197 			adj_start += alignment - tmp;
198 	}
199 
200 	if (adj_start == hole_start) {
201 		hole_node->hole_follows = 0;
202 		list_del(&hole_node->hole_stack);
203 	}
204 
205 	node->start = adj_start;
206 	node->size = size;
207 	node->mm = mm;
208 	node->color = color;
209 	node->allocated = 1;
210 
211 	INIT_LIST_HEAD(&node->hole_stack);
212 	list_add(&node->node_list, &hole_node->node_list);
213 
214 	BUG_ON(node->start + node->size > adj_end);
215 	BUG_ON(node->start + node->size > end);
216 
217 	node->hole_follows = 0;
218 	if (__drm_mm_hole_node_start(node) < hole_end) {
219 		list_add(&node->hole_stack, &mm->hole_stack);
220 		node->hole_follows = 1;
221 	}
222 }
223 
224 /**
225  * Search for free space and insert a preallocated memory node. Returns
226  * -ENOSPC if no suitable free area is available. This is for range
227  * restricted allocations. The preallocated memory node must be cleared.
228  */
229 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
230 					unsigned long size, unsigned alignment, unsigned long color,
231 					unsigned long start, unsigned long end,
232 					enum drm_mm_search_flags flags)
233 {
234 	struct drm_mm_node *hole_node;
235 
236 	hole_node = drm_mm_search_free_in_range_generic(mm,
237 							size, alignment, color,
238 							start, end, flags);
239 	if (!hole_node)
240 		return -ENOSPC;
241 
242 	drm_mm_insert_helper_range(hole_node, node,
243 				   size, alignment, color,
244 				   start, end);
245 	return 0;
246 }
247 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
248 
249 /**
250  * Remove a memory node from the allocator.
251  */
252 void drm_mm_remove_node(struct drm_mm_node *node)
253 {
254 	struct drm_mm *mm = node->mm;
255 	struct drm_mm_node *prev_node;
256 
257 	if (WARN_ON(!node->allocated))
258 		return;
259 
260 	BUG_ON(node->scanned_block || node->scanned_prev_free
261 				   || node->scanned_next_free);
262 
263 	prev_node =
264 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
265 
266 	if (node->hole_follows) {
267 		BUG_ON(__drm_mm_hole_node_start(node) ==
268 		       __drm_mm_hole_node_end(node));
269 		list_del(&node->hole_stack);
270 	} else
271 		BUG_ON(__drm_mm_hole_node_start(node) !=
272 		       __drm_mm_hole_node_end(node));
273 
274 
275 	if (!prev_node->hole_follows) {
276 		prev_node->hole_follows = 1;
277 		list_add(&prev_node->hole_stack, &mm->hole_stack);
278 	} else
279 		list_move(&prev_node->hole_stack, &mm->hole_stack);
280 
281 	list_del(&node->node_list);
282 	node->allocated = 0;
283 }
284 EXPORT_SYMBOL(drm_mm_remove_node);
285 
286 static int check_free_hole(unsigned long start, unsigned long end,
287 			   unsigned long size, unsigned alignment)
288 {
289 	if (end - start < size)
290 		return 0;
291 
292 	if (alignment) {
293 		unsigned tmp = start % alignment;
294 		if (tmp)
295 			start += alignment - tmp;
296 	}
297 
298 	return end >= start + size;
299 }
300 
301 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
302 						      unsigned long size,
303 						      unsigned alignment,
304 						      unsigned long color,
305 						      enum drm_mm_search_flags flags)
306 {
307 	struct drm_mm_node *entry;
308 	struct drm_mm_node *best;
309 	unsigned long adj_start;
310 	unsigned long adj_end;
311 	unsigned long best_size;
312 
313 	BUG_ON(mm->scanned_blocks);
314 
315 	best = NULL;
316 	best_size = ~0UL;
317 
318 	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
319 		if (mm->color_adjust) {
320 			mm->color_adjust(entry, color, &adj_start, &adj_end);
321 			if (adj_end <= adj_start)
322 				continue;
323 		}
324 
325 		if (!check_free_hole(adj_start, adj_end, size, alignment))
326 			continue;
327 
328 		if (!(flags & DRM_MM_SEARCH_BEST))
329 			return entry;
330 
331 		if (entry->size < best_size) {
332 			best = entry;
333 			best_size = entry->size;
334 		}
335 	}
336 
337 	return best;
338 }
339 
340 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
341 							unsigned long size,
342 							unsigned alignment,
343 							unsigned long color,
344 							unsigned long start,
345 							unsigned long end,
346 							enum drm_mm_search_flags flags)
347 {
348 	struct drm_mm_node *entry;
349 	struct drm_mm_node *best;
350 	unsigned long adj_start;
351 	unsigned long adj_end;
352 	unsigned long best_size;
353 
354 	BUG_ON(mm->scanned_blocks);
355 
356 	best = NULL;
357 	best_size = ~0UL;
358 
359 	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
360 		if (adj_start < start)
361 			adj_start = start;
362 		if (adj_end > end)
363 			adj_end = end;
364 
365 		if (mm->color_adjust) {
366 			mm->color_adjust(entry, color, &adj_start, &adj_end);
367 			if (adj_end <= adj_start)
368 				continue;
369 		}
370 
371 		if (!check_free_hole(adj_start, adj_end, size, alignment))
372 			continue;
373 
374 		if (!(flags & DRM_MM_SEARCH_BEST))
375 			return entry;
376 
377 		if (entry->size < best_size) {
378 			best = entry;
379 			best_size = entry->size;
380 		}
381 	}
382 
383 	return best;
384 }
385 
386 /**
387  * Moves an allocation. To be used with embedded struct drm_mm_node.
388  */
389 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
390 {
391 	list_replace(&old->node_list, &new->node_list);
392 	list_replace(&old->hole_stack, &new->hole_stack);
393 	new->hole_follows = old->hole_follows;
394 	new->mm = old->mm;
395 	new->start = old->start;
396 	new->size = old->size;
397 	new->color = old->color;
398 
399 	old->allocated = 0;
400 	new->allocated = 1;
401 }
402 EXPORT_SYMBOL(drm_mm_replace_node);
403 
404 /**
405  * Initializa lru scanning.
406  *
407  * This simply sets up the scanning routines with the parameters for the desired
408  * hole.
409  *
410  * Warning: As long as the scan list is non-empty, no other operations than
411  * adding/removing nodes to/from the scan list are allowed.
412  */
413 void drm_mm_init_scan(struct drm_mm *mm,
414 		      unsigned long size,
415 		      unsigned alignment,
416 		      unsigned long color)
417 {
418 	mm->scan_color = color;
419 	mm->scan_alignment = alignment;
420 	mm->scan_size = size;
421 	mm->scanned_blocks = 0;
422 	mm->scan_hit_start = 0;
423 	mm->scan_hit_end = 0;
424 	mm->scan_check_range = 0;
425 	mm->prev_scanned_node = NULL;
426 }
427 EXPORT_SYMBOL(drm_mm_init_scan);
428 
429 /**
430  * Initializa lru scanning.
431  *
432  * This simply sets up the scanning routines with the parameters for the desired
433  * hole. This version is for range-restricted scans.
434  *
435  * Warning: As long as the scan list is non-empty, no other operations than
436  * adding/removing nodes to/from the scan list are allowed.
437  */
438 void drm_mm_init_scan_with_range(struct drm_mm *mm,
439 				 unsigned long size,
440 				 unsigned alignment,
441 				 unsigned long color,
442 				 unsigned long start,
443 				 unsigned long end)
444 {
445 	mm->scan_color = color;
446 	mm->scan_alignment = alignment;
447 	mm->scan_size = size;
448 	mm->scanned_blocks = 0;
449 	mm->scan_hit_start = 0;
450 	mm->scan_hit_end = 0;
451 	mm->scan_start = start;
452 	mm->scan_end = end;
453 	mm->scan_check_range = 1;
454 	mm->prev_scanned_node = NULL;
455 }
456 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
457 
458 /**
459  * Add a node to the scan list that might be freed to make space for the desired
460  * hole.
461  *
462  * Returns non-zero, if a hole has been found, zero otherwise.
463  */
464 int drm_mm_scan_add_block(struct drm_mm_node *node)
465 {
466 	struct drm_mm *mm = node->mm;
467 	struct drm_mm_node *prev_node;
468 	unsigned long hole_start, hole_end;
469 	unsigned long adj_start, adj_end;
470 
471 	mm->scanned_blocks++;
472 
473 	BUG_ON(node->scanned_block);
474 	node->scanned_block = 1;
475 
476 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
477 			       node_list);
478 
479 	node->scanned_preceeds_hole = prev_node->hole_follows;
480 	prev_node->hole_follows = 1;
481 	list_del(&node->node_list);
482 	node->node_list.prev = &prev_node->node_list;
483 	node->node_list.next = &mm->prev_scanned_node->node_list;
484 	mm->prev_scanned_node = node;
485 
486 	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
487 	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
488 
489 	if (mm->scan_check_range) {
490 		if (adj_start < mm->scan_start)
491 			adj_start = mm->scan_start;
492 		if (adj_end > mm->scan_end)
493 			adj_end = mm->scan_end;
494 	}
495 
496 	if (mm->color_adjust)
497 		mm->color_adjust(prev_node, mm->scan_color,
498 				 &adj_start, &adj_end);
499 
500 	if (check_free_hole(adj_start, adj_end,
501 			    mm->scan_size, mm->scan_alignment)) {
502 		mm->scan_hit_start = hole_start;
503 		mm->scan_hit_end = hole_end;
504 		return 1;
505 	}
506 
507 	return 0;
508 }
509 EXPORT_SYMBOL(drm_mm_scan_add_block);
510 
511 /**
512  * Remove a node from the scan list.
513  *
514  * Nodes _must_ be removed in the exact same order from the scan list as they
515  * have been added, otherwise the internal state of the memory manager will be
516  * corrupted.
517  *
518  * When the scan list is empty, the selected memory nodes can be freed. An
519  * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
520  * return the just freed block (because its at the top of the free_stack list).
521  *
522  * Returns one if this block should be evicted, zero otherwise. Will always
523  * return zero when no hole has been found.
524  */
525 int drm_mm_scan_remove_block(struct drm_mm_node *node)
526 {
527 	struct drm_mm *mm = node->mm;
528 	struct drm_mm_node *prev_node;
529 
530 	mm->scanned_blocks--;
531 
532 	BUG_ON(!node->scanned_block);
533 	node->scanned_block = 0;
534 
535 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
536 			       node_list);
537 
538 	prev_node->hole_follows = node->scanned_preceeds_hole;
539 	list_add(&node->node_list, &prev_node->node_list);
540 
541 	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
542 		 node->start < mm->scan_hit_end);
543 }
544 EXPORT_SYMBOL(drm_mm_scan_remove_block);
545 
546 int drm_mm_clean(struct drm_mm * mm)
547 {
548 	struct list_head *head = &mm->head_node.node_list;
549 
550 	return (head->next->next == head);
551 }
552 EXPORT_SYMBOL(drm_mm_clean);
553 
554 void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
555 {
556 	INIT_LIST_HEAD(&mm->hole_stack);
557 	mm->scanned_blocks = 0;
558 
559 	/* Clever trick to avoid a special case in the free hole tracking. */
560 	INIT_LIST_HEAD(&mm->head_node.node_list);
561 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
562 	mm->head_node.hole_follows = 1;
563 	mm->head_node.scanned_block = 0;
564 	mm->head_node.scanned_prev_free = 0;
565 	mm->head_node.scanned_next_free = 0;
566 	mm->head_node.mm = mm;
567 	mm->head_node.start = start + size;
568 	mm->head_node.size = start - mm->head_node.start;
569 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
570 
571 	mm->color_adjust = NULL;
572 }
573 EXPORT_SYMBOL(drm_mm_init);
574 
575 void drm_mm_takedown(struct drm_mm * mm)
576 {
577 	WARN(!list_empty(&mm->head_node.node_list),
578 	     "Memory manager not clean during takedown.\n");
579 }
580 EXPORT_SYMBOL(drm_mm_takedown);
581 
582 static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
583 				       const char *prefix)
584 {
585 	unsigned long hole_start, hole_end, hole_size;
586 
587 	if (entry->hole_follows) {
588 		hole_start = drm_mm_hole_node_start(entry);
589 		hole_end = drm_mm_hole_node_end(entry);
590 		hole_size = hole_end - hole_start;
591 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
592 			prefix, hole_start, hole_end,
593 			hole_size);
594 		return hole_size;
595 	}
596 
597 	return 0;
598 }
599 
600 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
601 {
602 	struct drm_mm_node *entry;
603 	unsigned long total_used = 0, total_free = 0, total = 0;
604 
605 	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
606 
607 	drm_mm_for_each_node(entry, mm) {
608 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
609 			prefix, entry->start, entry->start + entry->size,
610 			entry->size);
611 		total_used += entry->size;
612 		total_free += drm_mm_debug_hole(entry, prefix);
613 	}
614 	total = total_free + total_used;
615 
616 	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
617 		total_used, total_free);
618 }
619 EXPORT_SYMBOL(drm_mm_debug_table);
620 
621 #if defined(CONFIG_DEBUG_FS)
622 static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
623 {
624 	unsigned long hole_start, hole_end, hole_size;
625 
626 	if (entry->hole_follows) {
627 		hole_start = drm_mm_hole_node_start(entry);
628 		hole_end = drm_mm_hole_node_end(entry);
629 		hole_size = hole_end - hole_start;
630 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
631 				hole_start, hole_end, hole_size);
632 		return hole_size;
633 	}
634 
635 	return 0;
636 }
637 
638 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
639 {
640 	struct drm_mm_node *entry;
641 	unsigned long total_used = 0, total_free = 0, total = 0;
642 
643 	total_free += drm_mm_dump_hole(m, &mm->head_node);
644 
645 	drm_mm_for_each_node(entry, mm) {
646 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
647 				entry->start, entry->start + entry->size,
648 				entry->size);
649 		total_used += entry->size;
650 		total_free += drm_mm_dump_hole(m, entry);
651 	}
652 	total = total_free + total_used;
653 
654 	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
655 	return 0;
656 }
657 EXPORT_SYMBOL(drm_mm_dump_table);
658 #endif
659