xref: /openbmc/linux/drivers/gpu/drm/drm_mm.c (revision 565d76cb)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28 
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43 
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 
49 #define MM_UNUSED_TARGET 4
50 
51 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
52 {
53 	struct drm_mm_node *child;
54 
55 	if (atomic)
56 		child = kzalloc(sizeof(*child), GFP_ATOMIC);
57 	else
58 		child = kzalloc(sizeof(*child), GFP_KERNEL);
59 
60 	if (unlikely(child == NULL)) {
61 		spin_lock(&mm->unused_lock);
62 		if (list_empty(&mm->unused_nodes))
63 			child = NULL;
64 		else {
65 			child =
66 			    list_entry(mm->unused_nodes.next,
67 				       struct drm_mm_node, node_list);
68 			list_del(&child->node_list);
69 			--mm->num_unused;
70 		}
71 		spin_unlock(&mm->unused_lock);
72 	}
73 	return child;
74 }
75 
76 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
77  * drm_mm:	memory manager struct we are pre-allocating for
78  *
79  * Returns 0 on success or -ENOMEM if allocation fails.
80  */
81 int drm_mm_pre_get(struct drm_mm *mm)
82 {
83 	struct drm_mm_node *node;
84 
85 	spin_lock(&mm->unused_lock);
86 	while (mm->num_unused < MM_UNUSED_TARGET) {
87 		spin_unlock(&mm->unused_lock);
88 		node = kzalloc(sizeof(*node), GFP_KERNEL);
89 		spin_lock(&mm->unused_lock);
90 
91 		if (unlikely(node == NULL)) {
92 			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
93 			spin_unlock(&mm->unused_lock);
94 			return ret;
95 		}
96 		++mm->num_unused;
97 		list_add_tail(&node->node_list, &mm->unused_nodes);
98 	}
99 	spin_unlock(&mm->unused_lock);
100 	return 0;
101 }
102 EXPORT_SYMBOL(drm_mm_pre_get);
103 
104 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
105 {
106 	return hole_node->start + hole_node->size;
107 }
108 
109 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
110 {
111 	struct drm_mm_node *next_node =
112 		list_entry(hole_node->node_list.next, struct drm_mm_node,
113 			   node_list);
114 
115 	return next_node->start;
116 }
117 
118 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
119 				 struct drm_mm_node *node,
120 				 unsigned long size, unsigned alignment)
121 {
122 	struct drm_mm *mm = hole_node->mm;
123 	unsigned long tmp = 0, wasted = 0;
124 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
125 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
126 
127 	BUG_ON(!hole_node->hole_follows || node->allocated);
128 
129 	if (alignment)
130 		tmp = hole_start % alignment;
131 
132 	if (!tmp) {
133 		hole_node->hole_follows = 0;
134 		list_del_init(&hole_node->hole_stack);
135 	} else
136 		wasted = alignment - tmp;
137 
138 	node->start = hole_start + wasted;
139 	node->size = size;
140 	node->mm = mm;
141 	node->allocated = 1;
142 
143 	INIT_LIST_HEAD(&node->hole_stack);
144 	list_add(&node->node_list, &hole_node->node_list);
145 
146 	BUG_ON(node->start + node->size > hole_end);
147 
148 	if (node->start + node->size < hole_end) {
149 		list_add(&node->hole_stack, &mm->hole_stack);
150 		node->hole_follows = 1;
151 	} else {
152 		node->hole_follows = 0;
153 	}
154 }
155 
156 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
157 					     unsigned long size,
158 					     unsigned alignment,
159 					     int atomic)
160 {
161 	struct drm_mm_node *node;
162 
163 	node = drm_mm_kmalloc(hole_node->mm, atomic);
164 	if (unlikely(node == NULL))
165 		return NULL;
166 
167 	drm_mm_insert_helper(hole_node, node, size, alignment);
168 
169 	return node;
170 }
171 EXPORT_SYMBOL(drm_mm_get_block_generic);
172 
173 /**
174  * Search for free space and insert a preallocated memory node. Returns
175  * -ENOSPC if no suitable free area is available. The preallocated memory node
176  * must be cleared.
177  */
178 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
179 		       unsigned long size, unsigned alignment)
180 {
181 	struct drm_mm_node *hole_node;
182 
183 	hole_node = drm_mm_search_free(mm, size, alignment, 0);
184 	if (!hole_node)
185 		return -ENOSPC;
186 
187 	drm_mm_insert_helper(hole_node, node, size, alignment);
188 
189 	return 0;
190 }
191 EXPORT_SYMBOL(drm_mm_insert_node);
192 
193 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
194 				       struct drm_mm_node *node,
195 				       unsigned long size, unsigned alignment,
196 				       unsigned long start, unsigned long end)
197 {
198 	struct drm_mm *mm = hole_node->mm;
199 	unsigned long tmp = 0, wasted = 0;
200 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
201 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
202 
203 	BUG_ON(!hole_node->hole_follows || node->allocated);
204 
205 	if (hole_start < start)
206 		wasted += start - hole_start;
207 	if (alignment)
208 		tmp = (hole_start + wasted) % alignment;
209 
210 	if (tmp)
211 		wasted += alignment - tmp;
212 
213 	if (!wasted) {
214 		hole_node->hole_follows = 0;
215 		list_del_init(&hole_node->hole_stack);
216 	}
217 
218 	node->start = hole_start + wasted;
219 	node->size = size;
220 	node->mm = mm;
221 	node->allocated = 1;
222 
223 	INIT_LIST_HEAD(&node->hole_stack);
224 	list_add(&node->node_list, &hole_node->node_list);
225 
226 	BUG_ON(node->start + node->size > hole_end);
227 	BUG_ON(node->start + node->size > end);
228 
229 	if (node->start + node->size < hole_end) {
230 		list_add(&node->hole_stack, &mm->hole_stack);
231 		node->hole_follows = 1;
232 	} else {
233 		node->hole_follows = 0;
234 	}
235 }
236 
237 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
238 						unsigned long size,
239 						unsigned alignment,
240 						unsigned long start,
241 						unsigned long end,
242 						int atomic)
243 {
244 	struct drm_mm_node *node;
245 
246 	node = drm_mm_kmalloc(hole_node->mm, atomic);
247 	if (unlikely(node == NULL))
248 		return NULL;
249 
250 	drm_mm_insert_helper_range(hole_node, node, size, alignment,
251 				   start, end);
252 
253 	return node;
254 }
255 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
256 
257 /**
258  * Search for free space and insert a preallocated memory node. Returns
259  * -ENOSPC if no suitable free area is available. This is for range
260  * restricted allocations. The preallocated memory node must be cleared.
261  */
262 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
263 				unsigned long size, unsigned alignment,
264 				unsigned long start, unsigned long end)
265 {
266 	struct drm_mm_node *hole_node;
267 
268 	hole_node = drm_mm_search_free_in_range(mm, size, alignment,
269 						start, end, 0);
270 	if (!hole_node)
271 		return -ENOSPC;
272 
273 	drm_mm_insert_helper_range(hole_node, node, size, alignment,
274 				   start, end);
275 
276 	return 0;
277 }
278 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
279 
280 /**
281  * Remove a memory node from the allocator.
282  */
283 void drm_mm_remove_node(struct drm_mm_node *node)
284 {
285 	struct drm_mm *mm = node->mm;
286 	struct drm_mm_node *prev_node;
287 
288 	BUG_ON(node->scanned_block || node->scanned_prev_free
289 				   || node->scanned_next_free);
290 
291 	prev_node =
292 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
293 
294 	if (node->hole_follows) {
295 		BUG_ON(drm_mm_hole_node_start(node)
296 				== drm_mm_hole_node_end(node));
297 		list_del(&node->hole_stack);
298 	} else
299 		BUG_ON(drm_mm_hole_node_start(node)
300 				!= drm_mm_hole_node_end(node));
301 
302 	if (!prev_node->hole_follows) {
303 		prev_node->hole_follows = 1;
304 		list_add(&prev_node->hole_stack, &mm->hole_stack);
305 	} else
306 		list_move(&prev_node->hole_stack, &mm->hole_stack);
307 
308 	list_del(&node->node_list);
309 	node->allocated = 0;
310 }
311 EXPORT_SYMBOL(drm_mm_remove_node);
312 
313 /*
314  * Remove a memory node from the allocator and free the allocated struct
315  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
316  * drm_mm_get_block functions.
317  */
318 void drm_mm_put_block(struct drm_mm_node *node)
319 {
320 
321 	struct drm_mm *mm = node->mm;
322 
323 	drm_mm_remove_node(node);
324 
325 	spin_lock(&mm->unused_lock);
326 	if (mm->num_unused < MM_UNUSED_TARGET) {
327 		list_add(&node->node_list, &mm->unused_nodes);
328 		++mm->num_unused;
329 	} else
330 		kfree(node);
331 	spin_unlock(&mm->unused_lock);
332 }
333 EXPORT_SYMBOL(drm_mm_put_block);
334 
335 static int check_free_hole(unsigned long start, unsigned long end,
336 			   unsigned long size, unsigned alignment)
337 {
338 	unsigned wasted = 0;
339 
340 	if (end - start < size)
341 		return 0;
342 
343 	if (alignment) {
344 		unsigned tmp = start % alignment;
345 		if (tmp)
346 			wasted = alignment - tmp;
347 	}
348 
349 	if (end >= start + size + wasted) {
350 		return 1;
351 	}
352 
353 	return 0;
354 }
355 
356 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
357 				       unsigned long size,
358 				       unsigned alignment, int best_match)
359 {
360 	struct drm_mm_node *entry;
361 	struct drm_mm_node *best;
362 	unsigned long best_size;
363 
364 	BUG_ON(mm->scanned_blocks);
365 
366 	best = NULL;
367 	best_size = ~0UL;
368 
369 	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
370 		BUG_ON(!entry->hole_follows);
371 		if (!check_free_hole(drm_mm_hole_node_start(entry),
372 				     drm_mm_hole_node_end(entry),
373 				     size, alignment))
374 			continue;
375 
376 		if (!best_match)
377 			return entry;
378 
379 		if (entry->size < best_size) {
380 			best = entry;
381 			best_size = entry->size;
382 		}
383 	}
384 
385 	return best;
386 }
387 EXPORT_SYMBOL(drm_mm_search_free);
388 
389 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
390 						unsigned long size,
391 						unsigned alignment,
392 						unsigned long start,
393 						unsigned long end,
394 						int best_match)
395 {
396 	struct drm_mm_node *entry;
397 	struct drm_mm_node *best;
398 	unsigned long best_size;
399 
400 	BUG_ON(mm->scanned_blocks);
401 
402 	best = NULL;
403 	best_size = ~0UL;
404 
405 	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
406 		unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
407 			start : drm_mm_hole_node_start(entry);
408 		unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
409 			end : drm_mm_hole_node_end(entry);
410 
411 		BUG_ON(!entry->hole_follows);
412 		if (!check_free_hole(adj_start, adj_end, size, alignment))
413 			continue;
414 
415 		if (!best_match)
416 			return entry;
417 
418 		if (entry->size < best_size) {
419 			best = entry;
420 			best_size = entry->size;
421 		}
422 	}
423 
424 	return best;
425 }
426 EXPORT_SYMBOL(drm_mm_search_free_in_range);
427 
428 /**
429  * Moves an allocation. To be used with embedded struct drm_mm_node.
430  */
431 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
432 {
433 	list_replace(&old->node_list, &new->node_list);
434 	list_replace(&old->node_list, &new->hole_stack);
435 	new->hole_follows = old->hole_follows;
436 	new->mm = old->mm;
437 	new->start = old->start;
438 	new->size = old->size;
439 
440 	old->allocated = 0;
441 	new->allocated = 1;
442 }
443 EXPORT_SYMBOL(drm_mm_replace_node);
444 
445 /**
446  * Initializa lru scanning.
447  *
448  * This simply sets up the scanning routines with the parameters for the desired
449  * hole.
450  *
451  * Warning: As long as the scan list is non-empty, no other operations than
452  * adding/removing nodes to/from the scan list are allowed.
453  */
454 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
455 		      unsigned alignment)
456 {
457 	mm->scan_alignment = alignment;
458 	mm->scan_size = size;
459 	mm->scanned_blocks = 0;
460 	mm->scan_hit_start = 0;
461 	mm->scan_hit_size = 0;
462 	mm->scan_check_range = 0;
463 	mm->prev_scanned_node = NULL;
464 }
465 EXPORT_SYMBOL(drm_mm_init_scan);
466 
467 /**
468  * Initializa lru scanning.
469  *
470  * This simply sets up the scanning routines with the parameters for the desired
471  * hole. This version is for range-restricted scans.
472  *
473  * Warning: As long as the scan list is non-empty, no other operations than
474  * adding/removing nodes to/from the scan list are allowed.
475  */
476 void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
477 				 unsigned alignment,
478 				 unsigned long start,
479 				 unsigned long end)
480 {
481 	mm->scan_alignment = alignment;
482 	mm->scan_size = size;
483 	mm->scanned_blocks = 0;
484 	mm->scan_hit_start = 0;
485 	mm->scan_hit_size = 0;
486 	mm->scan_start = start;
487 	mm->scan_end = end;
488 	mm->scan_check_range = 1;
489 	mm->prev_scanned_node = NULL;
490 }
491 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
492 
493 /**
494  * Add a node to the scan list that might be freed to make space for the desired
495  * hole.
496  *
497  * Returns non-zero, if a hole has been found, zero otherwise.
498  */
499 int drm_mm_scan_add_block(struct drm_mm_node *node)
500 {
501 	struct drm_mm *mm = node->mm;
502 	struct drm_mm_node *prev_node;
503 	unsigned long hole_start, hole_end;
504 	unsigned long adj_start;
505 	unsigned long adj_end;
506 
507 	mm->scanned_blocks++;
508 
509 	BUG_ON(node->scanned_block);
510 	node->scanned_block = 1;
511 
512 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
513 			       node_list);
514 
515 	node->scanned_preceeds_hole = prev_node->hole_follows;
516 	prev_node->hole_follows = 1;
517 	list_del(&node->node_list);
518 	node->node_list.prev = &prev_node->node_list;
519 	node->node_list.next = &mm->prev_scanned_node->node_list;
520 	mm->prev_scanned_node = node;
521 
522 	hole_start = drm_mm_hole_node_start(prev_node);
523 	hole_end = drm_mm_hole_node_end(prev_node);
524 	if (mm->scan_check_range) {
525 		adj_start = hole_start < mm->scan_start ?
526 			mm->scan_start : hole_start;
527 		adj_end = hole_end > mm->scan_end ?
528 			mm->scan_end : hole_end;
529 	} else {
530 		adj_start = hole_start;
531 		adj_end = hole_end;
532 	}
533 
534 	if (check_free_hole(adj_start , adj_end,
535 			    mm->scan_size, mm->scan_alignment)) {
536 		mm->scan_hit_start = hole_start;
537 		mm->scan_hit_size = hole_end;
538 
539 		return 1;
540 	}
541 
542 	return 0;
543 }
544 EXPORT_SYMBOL(drm_mm_scan_add_block);
545 
546 /**
547  * Remove a node from the scan list.
548  *
549  * Nodes _must_ be removed in the exact same order from the scan list as they
550  * have been added, otherwise the internal state of the memory manager will be
551  * corrupted.
552  *
553  * When the scan list is empty, the selected memory nodes can be freed. An
554  * immediatly following drm_mm_search_free with best_match = 0 will then return
555  * the just freed block (because its at the top of the free_stack list).
556  *
557  * Returns one if this block should be evicted, zero otherwise. Will always
558  * return zero when no hole has been found.
559  */
560 int drm_mm_scan_remove_block(struct drm_mm_node *node)
561 {
562 	struct drm_mm *mm = node->mm;
563 	struct drm_mm_node *prev_node;
564 
565 	mm->scanned_blocks--;
566 
567 	BUG_ON(!node->scanned_block);
568 	node->scanned_block = 0;
569 
570 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
571 			       node_list);
572 
573 	prev_node->hole_follows = node->scanned_preceeds_hole;
574 	INIT_LIST_HEAD(&node->node_list);
575 	list_add(&node->node_list, &prev_node->node_list);
576 
577 	/* Only need to check for containement because start&size for the
578 	 * complete resulting free block (not just the desired part) is
579 	 * stored. */
580 	if (node->start >= mm->scan_hit_start &&
581 	    node->start + node->size
582 	    		<= mm->scan_hit_start + mm->scan_hit_size) {
583 		return 1;
584 	}
585 
586 	return 0;
587 }
588 EXPORT_SYMBOL(drm_mm_scan_remove_block);
589 
590 int drm_mm_clean(struct drm_mm * mm)
591 {
592 	struct list_head *head = &mm->head_node.node_list;
593 
594 	return (head->next->next == head);
595 }
596 EXPORT_SYMBOL(drm_mm_clean);
597 
598 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
599 {
600 	INIT_LIST_HEAD(&mm->hole_stack);
601 	INIT_LIST_HEAD(&mm->unused_nodes);
602 	mm->num_unused = 0;
603 	mm->scanned_blocks = 0;
604 	spin_lock_init(&mm->unused_lock);
605 
606 	/* Clever trick to avoid a special case in the free hole tracking. */
607 	INIT_LIST_HEAD(&mm->head_node.node_list);
608 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
609 	mm->head_node.hole_follows = 1;
610 	mm->head_node.scanned_block = 0;
611 	mm->head_node.scanned_prev_free = 0;
612 	mm->head_node.scanned_next_free = 0;
613 	mm->head_node.mm = mm;
614 	mm->head_node.start = start + size;
615 	mm->head_node.size = start - mm->head_node.start;
616 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
617 
618 	return 0;
619 }
620 EXPORT_SYMBOL(drm_mm_init);
621 
622 void drm_mm_takedown(struct drm_mm * mm)
623 {
624 	struct drm_mm_node *entry, *next;
625 
626 	if (!list_empty(&mm->head_node.node_list)) {
627 		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
628 		return;
629 	}
630 
631 	spin_lock(&mm->unused_lock);
632 	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
633 		list_del(&entry->node_list);
634 		kfree(entry);
635 		--mm->num_unused;
636 	}
637 	spin_unlock(&mm->unused_lock);
638 
639 	BUG_ON(mm->num_unused != 0);
640 }
641 EXPORT_SYMBOL(drm_mm_takedown);
642 
643 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
644 {
645 	struct drm_mm_node *entry;
646 	unsigned long total_used = 0, total_free = 0, total = 0;
647 	unsigned long hole_start, hole_end, hole_size;
648 
649 	hole_start = drm_mm_hole_node_start(&mm->head_node);
650 	hole_end = drm_mm_hole_node_end(&mm->head_node);
651 	hole_size = hole_end - hole_start;
652 	if (hole_size)
653 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
654 			prefix, hole_start, hole_end,
655 			hole_size);
656 	total_free += hole_size;
657 
658 	drm_mm_for_each_node(entry, mm) {
659 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
660 			prefix, entry->start, entry->start + entry->size,
661 			entry->size);
662 		total_used += entry->size;
663 
664 		if (entry->hole_follows) {
665 			hole_start = drm_mm_hole_node_start(entry);
666 			hole_end = drm_mm_hole_node_end(entry);
667 			hole_size = hole_end - hole_start;
668 			printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
669 				prefix, hole_start, hole_end,
670 				hole_size);
671 			total_free += hole_size;
672 		}
673 	}
674 	total = total_free + total_used;
675 
676 	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
677 		total_used, total_free);
678 }
679 EXPORT_SYMBOL(drm_mm_debug_table);
680 
681 #if defined(CONFIG_DEBUG_FS)
682 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
683 {
684 	struct drm_mm_node *entry;
685 	unsigned long total_used = 0, total_free = 0, total = 0;
686 	unsigned long hole_start, hole_end, hole_size;
687 
688 	hole_start = drm_mm_hole_node_start(&mm->head_node);
689 	hole_end = drm_mm_hole_node_end(&mm->head_node);
690 	hole_size = hole_end - hole_start;
691 	if (hole_size)
692 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
693 				hole_start, hole_end, hole_size);
694 	total_free += hole_size;
695 
696 	drm_mm_for_each_node(entry, mm) {
697 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
698 				entry->start, entry->start + entry->size,
699 				entry->size);
700 		total_used += entry->size;
701 		if (entry->hole_follows) {
702 			hole_start = drm_mm_hole_node_start(&mm->head_node);
703 			hole_end = drm_mm_hole_node_end(&mm->head_node);
704 			hole_size = hole_end - hole_start;
705 			seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
706 					hole_start, hole_end, hole_size);
707 			total_free += hole_size;
708 		}
709 	}
710 	total = total_free + total_used;
711 
712 	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
713 	return 0;
714 }
715 EXPORT_SYMBOL(drm_mm_dump_table);
716 #endif
717